summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 00:22:50 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:02 -0400
commit9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (patch)
tree3814f79c10d7b490948d8cb7b112ac1dd41ceff1 /src/mongo/db/commands
parent01965cf52bce6976637ecb8f4a622aeb05ab256a (diff)
downloadmongo-9c2ed42daa8fbbef4a919c21ec564e2db55e8d60.tar.gz
SERVER-18579: Clang-Format - reformat code, no comment reflow
Diffstat (limited to 'src/mongo/db/commands')
-rw-r--r--src/mongo/db/commands/apply_ops.cpp177
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp604
-rw-r--r--src/mongo/db/commands/authentication_commands.h89
-rw-r--r--src/mongo/db/commands/cleanup_orphaned_cmd.cpp443
-rw-r--r--src/mongo/db/commands/clone.cpp151
-rw-r--r--src/mongo/db/commands/clone_collection.cpp171
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp216
-rw-r--r--src/mongo/db/commands/compact.cpp204
-rw-r--r--src/mongo/db/commands/connection_status.cpp169
-rw-r--r--src/mongo/db/commands/copydb.cpp326
-rw-r--r--src/mongo/db/commands/copydb.h14
-rw-r--r--src/mongo/db/commands/copydb_common.cpp92
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp292
-rw-r--r--src/mongo/db/commands/copydb_start_commands.h10
-rw-r--r--src/mongo/db/commands/count_cmd.cpp229
-rw-r--r--src/mongo/db/commands/cpuprofile.cpp205
-rw-r--r--src/mongo/db/commands/create_indexes.cpp414
-rw-r--r--src/mongo/db/commands/current_op.cpp177
-rw-r--r--src/mongo/db/commands/dbhash.cpp331
-rw-r--r--src/mongo/db/commands/dbhash.h78
-rw-r--r--src/mongo/db/commands/distinct.cpp235
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp246
-rw-r--r--src/mongo/db/commands/explain_cmd.cpp142
-rw-r--r--src/mongo/db/commands/explain_cmd.h96
-rw-r--r--src/mongo/db/commands/fail_point_cmd.cpp228
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp671
-rw-r--r--src/mongo/db/commands/find_and_modify.h16
-rw-r--r--src/mongo/db/commands/find_and_modify_common.cpp59
-rw-r--r--src/mongo/db/commands/find_cmd.cpp541
-rw-r--r--src/mongo/db/commands/fsync.cpp395
-rw-r--r--src/mongo/db/commands/fsync.h6
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp449
-rw-r--r--src/mongo/db/commands/get_last_error.cpp467
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp590
-rw-r--r--src/mongo/db/commands/group.cpp280
-rw-r--r--src/mongo/db/commands/group.h118
-rw-r--r--src/mongo/db/commands/hashcmd.cpp106
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp602
-rw-r--r--src/mongo/db/commands/index_filter_commands.h266
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp562
-rw-r--r--src/mongo/db/commands/isself.cpp62
-rw-r--r--src/mongo/db/commands/kill_op.cpp85
-rw-r--r--src/mongo/db/commands/list_collections.cpp284
-rw-r--r--src/mongo/db/commands/list_databases.cpp156
-rw-r--r--src/mongo/db/commands/list_indexes.cpp272
-rw-r--r--src/mongo/db/commands/merge_chunks_cmd.cpp244
-rw-r--r--src/mongo/db/commands/mr.cpp2894
-rw-r--r--src/mongo/db/commands/mr.h689
-rw-r--r--src/mongo/db/commands/mr_common.cpp185
-rw-r--r--src/mongo/db/commands/mr_test.cpp266
-rw-r--r--src/mongo/db/commands/oplog_note.cpp92
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp211
-rw-r--r--src/mongo/db/commands/parameters.cpp1007
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp509
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp656
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h271
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp689
-rw-r--r--src/mongo/db/commands/rename_collection.cpp159
-rw-r--r--src/mongo/db/commands/rename_collection.h14
-rw-r--r--src/mongo/db/commands/rename_collection_common.cpp100
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp144
-rw-r--r--src/mongo/db/commands/server_status.cpp462
-rw-r--r--src/mongo/db/commands/server_status.h99
-rw-r--r--src/mongo/db/commands/server_status_internal.cpp78
-rw-r--r--src/mongo/db/commands/server_status_internal.h23
-rw-r--r--src/mongo/db/commands/server_status_metric.cpp31
-rw-r--r--src/mongo/db/commands/server_status_metric.h93
-rw-r--r--src/mongo/db/commands/shutdown.h41
-rw-r--r--src/mongo/db/commands/test_commands.cpp366
-rw-r--r--src/mongo/db/commands/top_command.cpp90
-rw-r--r--src/mongo/db/commands/touch.cpp105
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp4666
-rw-r--r--src/mongo/db/commands/user_management_commands.h153
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp851
-rw-r--r--src/mongo/db/commands/validate.cpp142
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp2197
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.h266
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp447
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.h170
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.cpp87
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.h9
-rw-r--r--src/mongo/db/commands/writeback_compatibility_shim.cpp160
82 files changed, 14889 insertions, 15103 deletions
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index 7ca79ba57e4..7381d61dffc 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -58,103 +58,106 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class ApplyOpsCmd : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+class ApplyOpsCmd : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- ApplyOpsCmd() : Command( "applyOps" ) {}
- virtual void help( stringstream &help ) const {
- help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // applyOps can do pretty much anything, so require all privileges.
- RoleGraph::generateUniversalPrivileges(out);
- }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ ApplyOpsCmd() : Command("applyOps") {}
+ virtual void help(stringstream& help) const {
+ help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , "
+ "res : ... } ] }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // applyOps can do pretty much anything, so require all privileges.
+ RoleGraph::generateUniversalPrivileges(out);
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
- if ( cmdObj.firstElement().type() != Array ) {
- errmsg = "ops has to be an array";
- return false;
- }
+ if (cmdObj.firstElement().type() != Array) {
+ errmsg = "ops has to be an array";
+ return false;
+ }
- BSONObj ops = cmdObj.firstElement().Obj();
+ BSONObj ops = cmdObj.firstElement().Obj();
- {
- // check input
- BSONObjIterator i( ops );
- while ( i.more() ) {
- BSONElement e = i.next();
- if (!_checkOperation(e, errmsg)) {
- return false;
- }
+ {
+ // check input
+ BSONObjIterator i(ops);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (!_checkOperation(e, errmsg)) {
+ return false;
}
}
-
- return appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
}
- private:
- /**
- * Returns true if 'e' contains a valid operation.
- */
- bool _checkOperation(const BSONElement& e, string& errmsg) {
- if (e.type() != Object) {
- errmsg = str::stream() << "op not an object: " << e.fieldName();
- return false;
- }
- BSONObj obj = e.Obj();
- // op - operation type
- BSONElement opElement = obj.getField("op");
- if (opElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"op\" field: "
- << e.fieldName();
- return false;
- }
- if (opElement.type() != mongo::String) {
- errmsg = str::stream() << "\"op\" field is not a string: " << e.fieldName();
- return false;
- }
- // operation type -- see logOp() comments for types
- const char *opType = opElement.valuestrsafe();
- if (*opType == '\0') {
- errmsg = str::stream() << "\"op\" field value cannot be empty: " << e.fieldName();
- return false;
- }
+ return appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
+ }
- // ns - namespace
- // Only operations of type 'n' are allowed to have an empty namespace.
- BSONElement nsElement = obj.getField("ns");
- if (nsElement.eoo()) {
- errmsg = str::stream() << "op does not contain required \"ns\" field: "
- << e.fieldName();
- return false;
- }
- if (nsElement.type() != mongo::String) {
- errmsg = str::stream() << "\"ns\" field is not a string: " << e.fieldName();
- return false;
- }
- if (*opType != 'n' && nsElement.String().empty()) {
- errmsg = str::stream()
- << "\"ns\" field value cannot be empty when op type is not 'n': "
- << e.fieldName();
- return false;
- }
- return true;
+private:
+ /**
+ * Returns true if 'e' contains a valid operation.
+ */
+ bool _checkOperation(const BSONElement& e, string& errmsg) {
+ if (e.type() != Object) {
+ errmsg = str::stream() << "op not an object: " << e.fieldName();
+ return false;
+ }
+ BSONObj obj = e.Obj();
+ // op - operation type
+ BSONElement opElement = obj.getField("op");
+ if (opElement.eoo()) {
+ errmsg = str::stream()
+ << "op does not contain required \"op\" field: " << e.fieldName();
+ return false;
+ }
+ if (opElement.type() != mongo::String) {
+ errmsg = str::stream() << "\"op\" field is not a string: " << e.fieldName();
+ return false;
+ }
+ // operation type -- see logOp() comments for types
+ const char* opType = opElement.valuestrsafe();
+ if (*opType == '\0') {
+ errmsg = str::stream() << "\"op\" field value cannot be empty: " << e.fieldName();
+ return false;
}
- } applyOpsCmd;
+ // ns - namespace
+ // Only operations of type 'n' are allowed to have an empty namespace.
+ BSONElement nsElement = obj.getField("ns");
+ if (nsElement.eoo()) {
+ errmsg = str::stream()
+ << "op does not contain required \"ns\" field: " << e.fieldName();
+ return false;
+ }
+ if (nsElement.type() != mongo::String) {
+ errmsg = str::stream() << "\"ns\" field is not a string: " << e.fieldName();
+ return false;
+ }
+ if (*opType != 'n' && nsElement.String().empty()) {
+ errmsg = str::stream()
+ << "\"ns\" field value cannot be empty when op type is not 'n': " << e.fieldName();
+ return false;
+ }
+ return true;
+ }
+} applyOpsCmd;
}
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index 698eb2fd406..1b499f1d31d 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -64,370 +64,362 @@
namespace mongo {
- using std::hex;
- using std::string;
- using std::stringstream;
-
- static bool _isCRAuthDisabled;
- static bool _isX509AuthDisabled;
- static const char _nonceAuthenticationDisabledMessage[] =
- "Challenge-response authentication using getnonce and authenticate commands is disabled.";
- static const char _x509AuthenticationDisabledMessage[] =
- "x.509 authentication is disabled.";
-
- void CmdAuthenticate::disableAuthMechanism(std::string authMechanism) {
- if (authMechanism == "MONGODB-CR") {
- _isCRAuthDisabled = true;
- }
- if (authMechanism == "MONGODB-X509") {
- _isX509AuthDisabled = true;
- }
+using std::hex;
+using std::string;
+using std::stringstream;
+
+static bool _isCRAuthDisabled;
+static bool _isX509AuthDisabled;
+static const char _nonceAuthenticationDisabledMessage[] =
+ "Challenge-response authentication using getnonce and authenticate commands is disabled.";
+static const char _x509AuthenticationDisabledMessage[] = "x.509 authentication is disabled.";
+
+void CmdAuthenticate::disableAuthMechanism(std::string authMechanism) {
+ if (authMechanism == "MONGODB-CR") {
+ _isCRAuthDisabled = true;
}
+ if (authMechanism == "MONGODB-X509") {
+ _isX509AuthDisabled = true;
+ }
+}
- /* authentication
-
- system.users contains
- { user : <username>, pwd : <pwd_digest>, ... }
+/* authentication
- getnonce sends nonce to client
+ system.users contains
+ { user : <username>, pwd : <pwd_digest>, ... }
- client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
+ getnonce sends nonce to client
- where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
- */
+ client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
- class CmdGetNonce : public Command {
- public:
- CmdGetNonce() :
- Command("getnonce"),
- _random(SecureRandom::create()) {
- }
+ where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
+*/
- virtual bool slaveOk() const {
- return true;
- }
- void help(stringstream& h) const { h << "internal"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- nonce64 n = getNextNonce();
- stringstream ss;
- ss << hex << n;
- result.append("nonce", ss.str() );
- AuthenticationSession::set(
- ClientBasic::getCurrent(),
- stdx::make_unique<MongoAuthenticationSession>(n));
- return true;
- }
+class CmdGetNonce : public Command {
+public:
+ CmdGetNonce() : Command("getnonce"), _random(SecureRandom::create()) {}
- private:
- nonce64 getNextNonce() {
- stdx::lock_guard<SimpleMutex> lk(_randMutex);
- return _random->nextInt64();
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ void help(stringstream& h) const {
+ h << "internal";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ nonce64 n = getNextNonce();
+ stringstream ss;
+ ss << hex << n;
+ result.append("nonce", ss.str());
+ AuthenticationSession::set(ClientBasic::getCurrent(),
+ stdx::make_unique<MongoAuthenticationSession>(n));
+ return true;
+ }
- SimpleMutex _randMutex; // Synchronizes accesses to _random.
- std::unique_ptr<SecureRandom> _random;
- } cmdGetNonce;
+private:
+ nonce64 getNextNonce() {
+ stdx::lock_guard<SimpleMutex> lk(_randMutex);
+ return _random->nextInt64();
+ }
- void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
- namespace mmb = mutablebson;
- static const int numRedactedFields = 2;
- static const char* redactedFields[numRedactedFields] = { "key", "nonce" };
- for (int i = 0; i < numRedactedFields; ++i) {
- for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
- element.ok();
- element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
+ SimpleMutex _randMutex; // Synchronizes accesses to _random.
+ std::unique_ptr<SecureRandom> _random;
+} cmdGetNonce;
- element.setValueString("xxx");
- }
+void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
+ namespace mmb = mutablebson;
+ static const int numRedactedFields = 2;
+ static const char* redactedFields[numRedactedFields] = {"key", "nonce"};
+ for (int i = 0; i < numRedactedFields; ++i) {
+ for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
+ element.ok();
+ element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
+ element.setValueString("xxx");
}
}
+}
- bool CmdAuthenticate::run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- if (!serverGlobalParams.quiet) {
- mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
- redactForLogging(&cmdToLog);
- log() << " authenticate db: " << dbname << " " << cmdToLog;
- }
+bool CmdAuthenticate::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!serverGlobalParams.quiet) {
+ mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
+ redactForLogging(&cmdToLog);
+ log() << " authenticate db: " << dbname << " " << cmdToLog;
+ }
- UserName user(cmdObj.getStringField("user"), dbname);
- if (Command::testCommandsEnabled &&
- user.getDB() == "admin" &&
- user.getUser() == internalSecurity.user->getName().getUser()) {
- // Allows authenticating as the internal user against the admin database. This is to
- // support the auth passthrough test framework on mongos (since you can't use the local
- // database on a mongos, so you can't auth as the internal user without this).
- user = internalSecurity.user->getName();
- }
+ UserName user(cmdObj.getStringField("user"), dbname);
+ if (Command::testCommandsEnabled && user.getDB() == "admin" &&
+ user.getUser() == internalSecurity.user->getName().getUser()) {
+ // Allows authenticating as the internal user against the admin database. This is to
+ // support the auth passthrough test framework on mongos (since you can't use the local
+ // database on a mongos, so you can't auth as the internal user without this).
+ user = internalSecurity.user->getName();
+ }
- std::string mechanism = cmdObj.getStringField("mechanism");
- if (mechanism.empty()) {
- mechanism = "MONGODB-CR";
+ std::string mechanism = cmdObj.getStringField("mechanism");
+ if (mechanism.empty()) {
+ mechanism = "MONGODB-CR";
+ }
+ Status status = _authenticate(txn, mechanism, user, cmdObj);
+ audit::logAuthentication(ClientBasic::getCurrent(), mechanism, user, status.code());
+ if (!status.isOK()) {
+ if (!serverGlobalParams.quiet) {
+ log() << "Failed to authenticate " << user << " with mechanism " << mechanism << ": "
+ << status;
}
- Status status = _authenticate(txn, mechanism, user, cmdObj);
- audit::logAuthentication(ClientBasic::getCurrent(),
- mechanism,
- user,
- status.code());
- if (!status.isOK()) {
- if (!serverGlobalParams.quiet) {
- log() << "Failed to authenticate " << user << " with mechanism " << mechanism
- << ": " << status;
- }
- if (status.code() == ErrorCodes::AuthenticationFailed) {
- // Statuses with code AuthenticationFailed may contain messages we do not wish to
- // reveal to the user, so we return a status with the message "auth failed".
- appendCommandStatus(result,
- Status(ErrorCodes::AuthenticationFailed, "auth failed"));
- }
- else {
- appendCommandStatus(result, status);
- }
- return false;
+ if (status.code() == ErrorCodes::AuthenticationFailed) {
+ // Statuses with code AuthenticationFailed may contain messages we do not wish to
+ // reveal to the user, so we return a status with the message "auth failed".
+ appendCommandStatus(result, Status(ErrorCodes::AuthenticationFailed, "auth failed"));
+ } else {
+ appendCommandStatus(result, status);
}
- result.append("dbname", user.getDB());
- result.append("user", user.getUser());
- return true;
+ return false;
}
+ result.append("dbname", user.getDB());
+ result.append("user", user.getUser());
+ return true;
+}
- Status CmdAuthenticate::_authenticate(OperationContext* txn,
- const std::string& mechanism,
- const UserName& user,
- const BSONObj& cmdObj) {
-
- if (mechanism == "MONGODB-CR") {
- return _authenticateCR(txn, user, cmdObj);
- }
+Status CmdAuthenticate::_authenticate(OperationContext* txn,
+ const std::string& mechanism,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (mechanism == "MONGODB-CR") {
+ return _authenticateCR(txn, user, cmdObj);
+ }
#ifdef MONGO_CONFIG_SSL
- if (mechanism == "MONGODB-X509") {
- return _authenticateX509(txn, user, cmdObj);
- }
-#endif
- return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
+ if (mechanism == "MONGODB-X509") {
+ return _authenticateX509(txn, user, cmdObj);
}
+#endif
+ return Status(ErrorCodes::BadValue, "Unsupported mechanism: " + mechanism);
+}
- Status CmdAuthenticate::_authenticateCR(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
-
- if (user == internalSecurity.user->getName() &&
- serverGlobalParams.clusterAuthMode.load() ==
- ServerGlobalParams::ClusterAuthMode_x509) {
- return Status(ErrorCodes::AuthenticationFailed,
- "Mechanism x509 is required for internal cluster authentication");
- }
+Status CmdAuthenticate::_authenticateCR(OperationContext* txn,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (user == internalSecurity.user->getName() &&
+ serverGlobalParams.clusterAuthMode.load() == ServerGlobalParams::ClusterAuthMode_x509) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "Mechanism x509 is required for internal cluster authentication");
+ }
- if (_isCRAuthDisabled) {
- // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that
- // cluster members may communicate with each other.
- if (user != internalSecurity.user->getName()) {
- return Status(ErrorCodes::BadValue, _nonceAuthenticationDisabledMessage);
- }
+ if (_isCRAuthDisabled) {
+ // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that
+ // cluster members may communicate with each other.
+ if (user != internalSecurity.user->getName()) {
+ return Status(ErrorCodes::BadValue, _nonceAuthenticationDisabledMessage);
}
+ }
- string key = cmdObj.getStringField("key");
- string received_nonce = cmdObj.getStringField("nonce");
-
- if( user.getUser().empty() || key.empty() || received_nonce.empty() ) {
- sleepmillis(10);
- return Status(ErrorCodes::ProtocolError,
- "field missing/wrong type in received authenticate command");
- }
+ string key = cmdObj.getStringField("key");
+ string received_nonce = cmdObj.getStringField("nonce");
- stringstream digestBuilder;
+ if (user.getUser().empty() || key.empty() || received_nonce.empty()) {
+ sleepmillis(10);
+ return Status(ErrorCodes::ProtocolError,
+ "field missing/wrong type in received authenticate command");
+ }
- {
- ClientBasic *client = ClientBasic::getCurrent();
- std::unique_ptr<AuthenticationSession> session;
- AuthenticationSession::swap(client, session);
- if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) {
+ stringstream digestBuilder;
+
+ {
+ ClientBasic* client = ClientBasic::getCurrent();
+ std::unique_ptr<AuthenticationSession> session;
+ AuthenticationSession::swap(client, session);
+ if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) {
+ sleepmillis(30);
+ return Status(ErrorCodes::ProtocolError, "No pending nonce");
+ } else {
+ nonce64 nonce = static_cast<MongoAuthenticationSession*>(session.get())->getNonce();
+ digestBuilder << hex << nonce;
+ if (digestBuilder.str() != received_nonce) {
sleepmillis(30);
- return Status(ErrorCodes::ProtocolError, "No pending nonce");
- }
- else {
- nonce64 nonce = static_cast<MongoAuthenticationSession*>(session.get())->getNonce();
- digestBuilder << hex << nonce;
- if (digestBuilder.str() != received_nonce) {
- sleepmillis(30);
- return Status(ErrorCodes::AuthenticationFailed, "Received wrong nonce.");
- }
+ return Status(ErrorCodes::AuthenticationFailed, "Received wrong nonce.");
}
}
+ }
- User* userObj;
- Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
- if (!status.isOK()) {
- // Failure to find the privilege document indicates no-such-user, a fact that we do not
- // wish to reveal to the client. So, we return AuthenticationFailed rather than passing
- // through the returned status.
- return Status(ErrorCodes::AuthenticationFailed, status.toString());
- }
- string pwd = userObj->getCredentials().password;
- getGlobalAuthorizationManager()->releaseUser(userObj);
-
- if (pwd.empty()) {
- return Status(ErrorCodes::AuthenticationFailed,
- "MONGODB-CR credentials missing in the user document");
- }
+ User* userObj;
+ Status status = getGlobalAuthorizationManager()->acquireUser(txn, user, &userObj);
+ if (!status.isOK()) {
+ // Failure to find the privilege document indicates no-such-user, a fact that we do not
+ // wish to reveal to the client. So, we return AuthenticationFailed rather than passing
+ // through the returned status.
+ return Status(ErrorCodes::AuthenticationFailed, status.toString());
+ }
+ string pwd = userObj->getCredentials().password;
+ getGlobalAuthorizationManager()->releaseUser(userObj);
- md5digest d;
- {
- digestBuilder << user.getUser() << pwd;
- string done = digestBuilder.str();
+ if (pwd.empty()) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "MONGODB-CR credentials missing in the user document");
+ }
- md5_state_t st;
- md5_init(&st);
- md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
- md5_finish(&st, d);
- }
+ md5digest d;
+ {
+ digestBuilder << user.getUser() << pwd;
+ string done = digestBuilder.str();
- string computed = digestToString( d );
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t*)done.c_str(), done.size());
+ md5_finish(&st, d);
+ }
- if ( key != computed ) {
- return Status(ErrorCodes::AuthenticationFailed, "key mismatch");
- }
+ string computed = digestToString(d);
- AuthorizationSession* authorizationSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
- status = authorizationSession->addAndAuthorizeUser(txn, user);
- if (!status.isOK()) {
- return status;
- }
+ if (key != computed) {
+ return Status(ErrorCodes::AuthenticationFailed, "key mismatch");
+ }
- return Status::OK();
+ AuthorizationSession* authorizationSession =
+ AuthorizationSession::get(ClientBasic::getCurrent());
+ status = authorizationSession->addAndAuthorizeUser(txn, user);
+ if (!status.isOK()) {
+ return status;
}
+ return Status::OK();
+}
+
#ifdef MONGO_CONFIG_SSL
- void canonicalizeClusterDN(std::vector<std::string>* dn) {
- // remove all RDNs we don't care about
- for (size_t i=0; i<dn->size(); i++) {
- std::string& comp = dn->at(i);
- boost::algorithm::trim(comp);
- if (!mongoutils::str::startsWith(comp.c_str(), "DC=") &&
- !mongoutils::str::startsWith(comp.c_str(), "O=") &&
- !mongoutils::str::startsWith(comp.c_str(), "OU=")) {
- dn->erase(dn->begin()+i);
- i--;
- }
+void canonicalizeClusterDN(std::vector<std::string>* dn) {
+ // remove all RDNs we don't care about
+ for (size_t i = 0; i < dn->size(); i++) {
+ std::string& comp = dn->at(i);
+ boost::algorithm::trim(comp);
+ if (!mongoutils::str::startsWith(comp.c_str(), "DC=") &&
+ !mongoutils::str::startsWith(comp.c_str(), "O=") &&
+ !mongoutils::str::startsWith(comp.c_str(), "OU=")) {
+ dn->erase(dn->begin() + i);
+ i--;
}
- std::stable_sort(dn->begin(), dn->end());
}
+ std::stable_sort(dn->begin(), dn->end());
+}
- bool CmdAuthenticate::_clusterIdMatch(const std::string& subjectName,
- const std::string& srvSubjectName) {
- std::vector<string> clientRDN = StringSplitter::split(subjectName, ",");
- std::vector<string> serverRDN = StringSplitter::split(srvSubjectName, ",");
+bool CmdAuthenticate::_clusterIdMatch(const std::string& subjectName,
+ const std::string& srvSubjectName) {
+ std::vector<string> clientRDN = StringSplitter::split(subjectName, ",");
+ std::vector<string> serverRDN = StringSplitter::split(srvSubjectName, ",");
- canonicalizeClusterDN(&clientRDN);
- canonicalizeClusterDN(&serverRDN);
+ canonicalizeClusterDN(&clientRDN);
+ canonicalizeClusterDN(&serverRDN);
- if (clientRDN.size() == 0 || clientRDN.size() != serverRDN.size()) {
- return false;
- }
+ if (clientRDN.size() == 0 || clientRDN.size() != serverRDN.size()) {
+ return false;
+ }
- for (size_t i=0; i < serverRDN.size(); i++) {
- if(clientRDN[i] != serverRDN[i]) {
- return false;
- }
+ for (size_t i = 0; i < serverRDN.size(); i++) {
+ if (clientRDN[i] != serverRDN[i]) {
+ return false;
}
- return true;
}
-
- Status CmdAuthenticate::_authenticateX509(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj) {
- if (!getSSLManager()) {
- return Status(ErrorCodes::ProtocolError,
- "SSL support is required for the MONGODB-X509 mechanism.");
- }
- if(user.getDB() != "$external") {
- return Status(ErrorCodes::ProtocolError,
- "X.509 authentication must always use the $external database.");
- }
+ return true;
+}
- ClientBasic *client = ClientBasic::getCurrent();
- AuthorizationSession* authorizationSession = AuthorizationSession::get(client);
- std::string subjectName = client->port()->getX509SubjectName();
+Status CmdAuthenticate::_authenticateX509(OperationContext* txn,
+ const UserName& user,
+ const BSONObj& cmdObj) {
+ if (!getSSLManager()) {
+ return Status(ErrorCodes::ProtocolError,
+ "SSL support is required for the MONGODB-X509 mechanism.");
+ }
+ if (user.getDB() != "$external") {
+ return Status(ErrorCodes::ProtocolError,
+ "X.509 authentication must always use the $external database.");
+ }
- if (!getSSLManager()->getSSLConfiguration().hasCA) {
- return Status(ErrorCodes::AuthenticationFailed,
- "Unable to verify x.509 certificate, as no CA has been provided.");
- }
- else if (user.getUser() != subjectName) {
- return Status(ErrorCodes::AuthenticationFailed,
- "There is no x.509 client certificate matching the user.");
+ ClientBasic* client = ClientBasic::getCurrent();
+ AuthorizationSession* authorizationSession = AuthorizationSession::get(client);
+ std::string subjectName = client->port()->getX509SubjectName();
+
+ if (!getSSLManager()->getSSLConfiguration().hasCA) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "Unable to verify x.509 certificate, as no CA has been provided.");
+ } else if (user.getUser() != subjectName) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "There is no x.509 client certificate matching the user.");
+ } else {
+ std::string srvSubjectName = getSSLManager()->getSSLConfiguration().serverSubjectName;
+
+ // Handle internal cluster member auth, only applies to server-server connections
+ if (_clusterIdMatch(subjectName, srvSubjectName)) {
+ int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
+ if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_undefined ||
+ clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile) {
+ return Status(ErrorCodes::AuthenticationFailed,
+ "The provided certificate "
+ "can only be used for cluster authentication, not client "
+ "authentication. The current configuration does not allow "
+ "x.509 cluster authentication, check the --clusterAuthMode flag");
+ }
+ authorizationSession->grantInternalAuthorization();
}
+ // Handle normal client authentication, only applies to client-server connections
else {
- std::string srvSubjectName = getSSLManager()->getSSLConfiguration().serverSubjectName;
-
- // Handle internal cluster member auth, only applies to server-server connections
- if (_clusterIdMatch(subjectName, srvSubjectName)) {
- int clusterAuthMode = serverGlobalParams.clusterAuthMode.load();
- if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_undefined ||
- clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile) {
- return Status(ErrorCodes::AuthenticationFailed, "The provided certificate "
- "can only be used for cluster authentication, not client "
- "authentication. The current configuration does not allow "
- "x.509 cluster authentication, check the --clusterAuthMode flag");
- }
- authorizationSession->grantInternalAuthorization();
+ if (_isX509AuthDisabled) {
+ return Status(ErrorCodes::BadValue, _x509AuthenticationDisabledMessage);
}
- // Handle normal client authentication, only applies to client-server connections
- else {
- if (_isX509AuthDisabled) {
- return Status(ErrorCodes::BadValue,
- _x509AuthenticationDisabledMessage);
- }
- Status status = authorizationSession->addAndAuthorizeUser(txn, user);
- if (!status.isOK()) {
- return status;
- }
+ Status status = authorizationSession->addAndAuthorizeUser(txn, user);
+ if (!status.isOK()) {
+ return status;
}
- return Status::OK();
}
+ return Status::OK();
}
+}
#endif
- CmdAuthenticate cmdAuthenticate;
+CmdAuthenticate cmdAuthenticate;
- class CmdLogout : public Command {
- public:
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- void help(stringstream& h) const { h << "de-authenticate"; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- CmdLogout() : Command("logout") {}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- AuthorizationSession* authSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
- authSession->logoutDatabase(dbname);
- if (Command::testCommandsEnabled && dbname == "admin") {
- // Allows logging out as the internal user against the admin database, however
- // this actually logs out of the local database as well. This is to
- // support the auth passthrough test framework on mongos (since you can't use the
- // local database on a mongos, so you can't logout as the internal user
- // without this).
- authSession->logoutDatabase("local");
- }
- return true;
+class CmdLogout : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ void help(stringstream& h) const {
+ h << "de-authenticate";
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ CmdLogout() : Command("logout") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationSession* authSession = AuthorizationSession::get(ClientBasic::getCurrent());
+ authSession->logoutDatabase(dbname);
+ if (Command::testCommandsEnabled && dbname == "admin") {
+ // Allows logging out as the internal user against the admin database, however
+ // this actually logs out of the local database as well. This is to
+ // support the auth passthrough test framework on mongos (since you can't use the
+ // local database on a mongos, so you can't logout as the internal user
+ // without this).
+ authSession->logoutDatabase("local");
}
- } cmdLogout;
+ return true;
+ }
+} cmdLogout;
}
diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h
index e22711454e2..67a41c18401 100644
--- a/src/mongo/db/commands/authentication_commands.h
+++ b/src/mongo/db/commands/authentication_commands.h
@@ -36,52 +36,53 @@
namespace mongo {
- class CmdAuthenticate : public Command {
- public:
- static void disableAuthMechanism(std::string authMechanism);
+class CmdAuthenticate : public Command {
+public:
+ static void disableAuthMechanism(std::string authMechanism);
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help(std::stringstream& ss) const { ss << "internal"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void redactForLogging(mutablebson::Document* cmdObj);
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(std::stringstream& ss) const {
+ ss << "internal";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void redactForLogging(mutablebson::Document* cmdObj);
- CmdAuthenticate() : Command("authenticate") {}
- bool run(OperationContext* txn, const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
+ CmdAuthenticate() : Command("authenticate") {}
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
- private:
- /**
- * Completes the authentication of "user" using "mechanism" and parameters from "cmdObj".
- *
- * Returns Status::OK() on success. All other statuses indicate failed authentication. The
- * entire status returned here may always be used for logging. However, if the code is
- * AuthenticationFailed, the "reason" field of the return status may contain information
- * that should not be revealed to the connected client.
- *
- * Other than AuthenticationFailed, common returns are BadValue, indicating unsupported
- * mechanism, and ProtocolError, indicating an error in the use of the authentication
- * protocol.
- */
- Status _authenticate(OperationContext* txn,
- const std::string& mechanism,
- const UserName& user,
- const BSONObj& cmdObj);
- Status _authenticateCR(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- Status _authenticateX509(
- OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
- bool _clusterIdMatch(const std::string& subjectName, const std::string& srvSubjectName);
- };
+private:
+ /**
+ * Completes the authentication of "user" using "mechanism" and parameters from "cmdObj".
+ *
+ * Returns Status::OK() on success. All other statuses indicate failed authentication. The
+ * entire status returned here may always be used for logging. However, if the code is
+ * AuthenticationFailed, the "reason" field of the return status may contain information
+ * that should not be revealed to the connected client.
+ *
+ * Other than AuthenticationFailed, common returns are BadValue, indicating unsupported
+ * mechanism, and ProtocolError, indicating an error in the use of the authentication
+ * protocol.
+ */
+ Status _authenticate(OperationContext* txn,
+ const std::string& mechanism,
+ const UserName& user,
+ const BSONObj& cmdObj);
+ Status _authenticateCR(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ Status _authenticateX509(OperationContext* txn, const UserName& user, const BSONObj& cmdObj);
+ bool _clusterIdMatch(const std::string& subjectName, const std::string& srvSubjectName);
+};
- extern CmdAuthenticate cmdAuthenticate;
+extern CmdAuthenticate cmdAuthenticate;
}
-
-
diff --git a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
index a6ff2b90a6d..50666033aa6 100644
--- a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
@@ -50,277 +50,258 @@
#include "mongo/util/log.h"
namespace {
- using mongo::WriteConcernOptions;
+using mongo::WriteConcernOptions;
- const int kDefaultWTimeoutMs = 60 * 1000;
- const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
- WriteConcernOptions::NONE,
- kDefaultWTimeoutMs);
+const int kDefaultWTimeoutMs = 60 * 1000;
+const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
+ WriteConcernOptions::NONE,
+ kDefaultWTimeoutMs);
}
namespace mongo {
- using std::endl;
- using std::string;
-
- using mongoutils::str::stream;
-
- enum CleanupResult {
- CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error
- };
-
- /**
- * Cleans up one range of orphaned data starting from a range that overlaps or starts at
- * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
- *
- * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
- * @return CleanupResult_Done if no orphaned ranges remain
- * @return CleanupResult_Error and 'errMsg' if an error occurred
- *
- * If the collection is not sharded, returns CleanupResult_Done.
- */
- CleanupResult cleanupOrphanedData( OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& startingFromKeyConst,
- const WriteConcernOptions& secondaryThrottle,
- BSONObj* stoppedAtKey,
- string* errMsg ) {
-
- BSONObj startingFromKey = startingFromKeyConst;
-
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns.toString() );
- if ( !metadata || metadata->getKeyPattern().isEmpty() ) {
-
- warning() << "skipping orphaned data cleanup for " << ns.toString()
- << ", collection is not sharded" << endl;
-
- return CleanupResult_Done;
- }
+using std::endl;
+using std::string;
- BSONObj keyPattern = metadata->getKeyPattern();
- if ( !startingFromKey.isEmpty() ) {
- if ( !metadata->isValidKey( startingFromKey ) ) {
+using mongoutils::str::stream;
- *errMsg = stream() << "could not cleanup orphaned data, start key "
- << startingFromKey
- << " does not match shard key pattern " << keyPattern;
+enum CleanupResult { CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error };
- warning() << *errMsg << endl;
- return CleanupResult_Error;
- }
- }
- else {
- startingFromKey = metadata->getMinKey();
- }
-
- KeyRange orphanRange;
- if ( !metadata->getNextOrphanRange( startingFromKey, &orphanRange ) ) {
+/**
+ * Cleans up one range of orphaned data starting from a range that overlaps or starts at
+ * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
+ *
+ * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
+ * @return CleanupResult_Done if no orphaned ranges remain
+ * @return CleanupResult_Error and 'errMsg' if an error occurred
+ *
+ * If the collection is not sharded, returns CleanupResult_Done.
+ */
+CleanupResult cleanupOrphanedData(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& startingFromKeyConst,
+ const WriteConcernOptions& secondaryThrottle,
+ BSONObj* stoppedAtKey,
+ string* errMsg) {
+ BSONObj startingFromKey = startingFromKeyConst;
+
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(ns.toString());
+ if (!metadata || metadata->getKeyPattern().isEmpty()) {
+ warning() << "skipping orphaned data cleanup for " << ns.toString()
+ << ", collection is not sharded" << endl;
+
+ return CleanupResult_Done;
+ }
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", no orphan ranges remain" << endl;
+ BSONObj keyPattern = metadata->getKeyPattern();
+ if (!startingFromKey.isEmpty()) {
+ if (!metadata->isValidKey(startingFromKey)) {
+ *errMsg = stream() << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
- return CleanupResult_Done;
- }
- orphanRange.ns = ns;
- *stoppedAtKey = orphanRange.maxKey;
-
- // We're done with this metadata now, no matter what happens
- metadata.reset();
-
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", removing next orphan range"
- << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")"
- << endl;
-
- // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
- // before delete.
- RangeDeleterOptions deleterOptions(orphanRange);
- deleterOptions.writeConcern = secondaryThrottle;
- deleterOptions.onlyRemoveOrphanedDocs = true;
- deleterOptions.fromMigrate = true;
- // Must wait for cursors since there can be existing cursors with an older
- // CollectionMetadata.
- deleterOptions.waitForOpenCursors = true;
- deleterOptions.removeSaverReason = "cleanup-cmd";
-
- if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
warning() << *errMsg << endl;
return CleanupResult_Error;
}
-
- return CleanupResult_Continue;
+ } else {
+ startingFromKey = metadata->getMinKey();
}
- /**
- * Cleanup orphaned data command. Called on a particular namespace, and if the collection
- * is sharded will clean up a single orphaned data range which overlaps or starts after a
- * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
- * search for the next orphaned range if the command is called again) or no key if there
- * are no more orphaned ranges in the collection.
- *
- * If the collection is not sharded, returns true but no 'stoppedAtKey'.
- * On failure, returns false and an error message.
- *
- * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
- * full collection range is searched for orphaned documents, but since sharding state may
- * change between calls there is no guarantee that all orphaned documents were found unless
- * the balancer is off.
- *
- * Safe to call with the balancer on.
- *
- * Format:
- *
- * {
- * cleanupOrphaned: <ns>,
- * // optional parameters:
- * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
- * secondaryThrottle: <bool>, // defaults to true
- * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
- * writeConcern: { <writeConcern options> }
- * }
- */
- class CleanupOrphanedCommand : public Command {
- public:
- CleanupOrphanedCommand() :
- Command( "cleanupOrphaned" ) {}
-
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth( const BSONObj& cmdObj ) { return false; }
-
- virtual Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
- return Status(ErrorCodes::Unauthorized,
- "Not authorized for cleanupOrphaned command.");
- }
- return Status::OK();
- }
+ KeyRange orphanRange;
+ if (!metadata->getNextOrphanRange(startingFromKey, &orphanRange)) {
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", no orphan ranges remain" << endl;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ return CleanupResult_Done;
+ }
+ orphanRange.ns = ns;
+ *stoppedAtKey = orphanRange.maxKey;
+
+ // We're done with this metadata now, no matter what happens
+ metadata.reset();
+
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", removing next orphan range"
+ << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")" << endl;
+
+ // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
+ // before delete.
+ RangeDeleterOptions deleterOptions(orphanRange);
+ deleterOptions.writeConcern = secondaryThrottle;
+ deleterOptions.onlyRemoveOrphanedDocs = true;
+ deleterOptions.fromMigrate = true;
+ // Must wait for cursors since there can be existing cursors with an older
+ // CollectionMetadata.
+ deleterOptions.waitForOpenCursors = true;
+ deleterOptions.removeSaverReason = "cleanup-cmd";
+
+ if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
+ warning() << *errMsg << endl;
+ return CleanupResult_Error;
+ }
- // Input
- static BSONField<string> nsField;
- static BSONField<BSONObj> startingFromKeyField;
+ return CleanupResult_Continue;
+}
- // Output
- static BSONField<BSONObj> stoppedAtKeyField;
+/**
+ * Cleanup orphaned data command. Called on a particular namespace, and if the collection
+ * is sharded will clean up a single orphaned data range which overlaps or starts after a
+ * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
+ * search for the next orphaned range if the command is called again) or no key if there
+ * are no more orphaned ranges in the collection.
+ *
+ * If the collection is not sharded, returns true but no 'stoppedAtKey'.
+ * On failure, returns false and an error message.
+ *
+ * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
+ * full collection range is searched for orphaned documents, but since sharding state may
+ * change between calls there is no guarantee that all orphaned documents were found unless
+ * the balancer is off.
+ *
+ * Safe to call with the balancer on.
+ *
+ * Format:
+ *
+ * {
+ * cleanupOrphaned: <ns>,
+ * // optional parameters:
+ * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
+ * secondaryThrottle: <bool>, // defaults to true
+ * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
+ * writeConcern: { <writeConcern options> }
+ * }
+ */
+class CleanupOrphanedCommand : public Command {
+public:
+ CleanupOrphanedCommand() : Command("cleanupOrphaned") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return false;
+ }
- bool run( OperationContext* txn,
- string const &db,
- BSONObj &cmdObj,
- int,
- string &errmsg,
- BSONObjBuilder &result) {
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized for cleanupOrphaned command.");
+ }
+ return Status::OK();
+ }
- string ns;
- if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if ( ns == "" ) {
- errmsg = "no collection name specified";
- return false;
- }
+ // Input
+ static BSONField<string> nsField;
+ static BSONField<BSONObj> startingFromKeyField;
+
+ // Output
+ static BSONField<BSONObj> stoppedAtKeyField;
+
+ bool run(OperationContext* txn,
+ string const& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns;
+ if (!FieldParser::extract(cmdObj, nsField, &ns, &errmsg)) {
+ return false;
+ }
- BSONObj startingFromKey;
- if ( !FieldParser::extract( cmdObj,
- startingFromKeyField,
- &startingFromKey,
- &errmsg ) ) {
- return false;
- }
+ if (ns == "") {
+ errmsg = "no collection name specified";
+ return false;
+ }
- WriteConcernOptions writeConcern;
- Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
+ BSONObj startingFromKey;
+ if (!FieldParser::extract(cmdObj, startingFromKeyField, &startingFromKey, &errmsg)) {
+ return false;
+ }
- if (!status.isOK()){
- if (status.code() != ErrorCodes::WriteConcernNotDefined) {
- return appendCommandStatus(result, status);
- }
+ WriteConcernOptions writeConcern;
+ Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
- writeConcern = DefaultWriteConcern;
- }
- else {
- repl::ReplicationCoordinator* replCoordinator =
- repl::getGlobalReplicationCoordinator();
- Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
-
- if (replCoordinator->getReplicationMode() ==
- repl::ReplicationCoordinator::modeMasterSlave &&
- writeConcern.shouldWaitForOtherNodes()) {
- warning() << "cleanupOrphaned cannot check if write concern setting "
- << writeConcern.toBSON()
- << " can be enforced in a master slave configuration";
- }
-
- if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
- return appendCommandStatus(result, status);
- }
+ if (!status.isOK()) {
+ if (status.code() != ErrorCodes::WriteConcernNotDefined) {
+ return appendCommandStatus(result, status);
}
- if (writeConcern.shouldWaitForOtherNodes() &&
- writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
- // Don't allow no timeout.
- writeConcern.wTimeout = kDefaultWTimeoutMs;
+ writeConcern = DefaultWriteConcern;
+ } else {
+ repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
+ Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
+
+ if (replCoordinator->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeMasterSlave &&
+ writeConcern.shouldWaitForOtherNodes()) {
+ warning() << "cleanupOrphaned cannot check if write concern setting "
+ << writeConcern.toBSON()
+ << " can be enforced in a master slave configuration";
}
- if (!shardingState.enabled()) {
- errmsg = str::stream() << "server is not part of a sharded cluster or "
- << "the sharding metadata is not yet initialized.";
- return false;
+ if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
+ return appendCommandStatus(result, status);
}
+ }
- ChunkVersion shardVersion;
- status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
- if ( !status.isOK() ) {
- if ( status.code() == ErrorCodes::RemoteChangeDetected ) {
- warning() << "Shard version in transition detected while refreshing "
- << "metadata for " << ns << " at version " << shardVersion << endl;
- }
- else {
- errmsg = str::stream() << "failed to refresh shard metadata: "
- << status.reason();
- return false;
- }
- }
+ if (writeConcern.shouldWaitForOtherNodes() &&
+ writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
+ // Don't allow no timeout.
+ writeConcern.wTimeout = kDefaultWTimeoutMs;
+ }
- BSONObj stoppedAtKey;
- CleanupResult cleanupResult = cleanupOrphanedData( txn,
- NamespaceString( ns ),
- startingFromKey,
- writeConcern,
- &stoppedAtKey,
- &errmsg );
+ if (!shardingState.enabled()) {
+ errmsg = str::stream() << "server is not part of a sharded cluster or "
+ << "the sharding metadata is not yet initialized.";
+ return false;
+ }
- if ( cleanupResult == CleanupResult_Error ) {
+ ChunkVersion shardVersion;
+ status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::RemoteChangeDetected) {
+ warning() << "Shard version in transition detected while refreshing "
+ << "metadata for " << ns << " at version " << shardVersion << endl;
+ } else {
+ errmsg = str::stream() << "failed to refresh shard metadata: " << status.reason();
return false;
}
+ }
- if ( cleanupResult == CleanupResult_Continue ) {
- result.append( stoppedAtKeyField(), stoppedAtKey );
- }
- else {
- dassert( cleanupResult == CleanupResult_Done );
- }
+ BSONObj stoppedAtKey;
+ CleanupResult cleanupResult = cleanupOrphanedData(
+ txn, NamespaceString(ns), startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
- return true;
+ if (cleanupResult == CleanupResult_Error) {
+ return false;
}
- };
- BSONField<string> CleanupOrphanedCommand::nsField( "cleanupOrphaned" );
- BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField( "startingFromKey" );
- BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField( "stoppedAtKey" );
+ if (cleanupResult == CleanupResult_Continue) {
+ result.append(stoppedAtKeyField(), stoppedAtKey);
+ } else {
+ dassert(cleanupResult == CleanupResult_Done);
+ }
- MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CleanupOrphanedCommand();
- return Status::OK();
+ return true;
}
+};
+
+BSONField<string> CleanupOrphanedCommand::nsField("cleanupOrphaned");
+BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField("startingFromKey");
+BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField("stoppedAtKey");
-} // namespace mongo
+MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CleanupOrphanedCommand();
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index f2fd0d8928f..0e6c7fbf1e7 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -39,97 +39,98 @@
namespace {
- using namespace mongo;
-
- using std::set;
- using std::string;
- using std::stringstream;
-
- /* Usage:
- mydb.$cmd.findOne( { clone: "fromhost" } );
- Note: doesn't work with authentication enabled, except as internal operation or for
- old-style users for backwards compatibility.
- */
- class CmdClone : public Command {
- public:
- CmdClone() : Command("clone") { }
-
- virtual bool slaveOk() const {
- return false;
- }
+using namespace mongo;
- virtual bool isWriteCommandForConfigServer() const { return true; }
+using std::set;
+using std::string;
+using std::stringstream;
- virtual void help( stringstream &help ) const {
- help << "clone this database from an instance of the db on another host\n";
- help << "{clone: \"host13\"[, slaveOk: <bool>]}";
+/* Usage:
+ mydb.$cmd.findOne( { clone: "fromhost" } );
+ Note: doesn't work with authentication enabled, except as internal operation or for
+ old-style users for backwards compatibility.
+*/
+class CmdClone : public Command {
+public:
+ CmdClone() : Command("clone") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "clone this database from an instance of the db on another host\n";
+ help << "{clone: \"host13\"[, slaveOk: <bool>]}";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(dbname), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ maybeDisableValidation.emplace(txn);
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- maybeDisableValidation.emplace(txn);
- }
-
- string from = cmdObj.getStringField("clone");
- if ( from.empty() )
- return false;
+ string from = cmdObj.getStringField("clone");
+ if (from.empty())
+ return false;
- CloneOptions opts;
- opts.fromDB = dbname;
- opts.slaveOk = cmdObj["slaveOk"].trueValue();
+ CloneOptions opts;
+ opts.fromDB = dbname;
+ opts.slaveOk = cmdObj["slaveOk"].trueValue();
- // See if there's any collections we should ignore
- if( cmdObj["collsToIgnore"].type() == Array ){
- BSONObjIterator it( cmdObj["collsToIgnore"].Obj() );
+ // See if there's any collections we should ignore
+ if (cmdObj["collsToIgnore"].type() == Array) {
+ BSONObjIterator it(cmdObj["collsToIgnore"].Obj());
- while( it.more() ){
- BSONElement e = it.next();
- if( e.type() == String ){
- opts.collsToIgnore.insert( e.String() );
- }
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (e.type() == String) {
+ opts.collsToIgnore.insert(e.String());
}
}
+ }
- set<string> clonedColls;
+ set<string> clonedColls;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- Cloner cloner;
- Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
+ Cloner cloner;
+ Status status = cloner.copyDb(txn, dbname, from, opts, &clonedColls);
- BSONArrayBuilder barr;
- barr.append( clonedColls );
+ BSONArrayBuilder barr;
+ barr.append(clonedColls);
- result.append("clonedColls", barr.arr());
+ result.append("clonedColls", barr.arr());
- return appendCommandStatus(result, status);
- }
+ return appendCommandStatus(result, status);
+ }
- } cmdClone;
+} cmdClone;
-} // namespace
+} // namespace
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 2ef62f8b090..f8bda90a8da 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -57,105 +57,106 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
- using std::endl;
-
- class CmdCloneCollection : public Command {
- public:
- CmdCloneCollection() : Command("cloneCollection") { }
-
- virtual bool slaveOk() const {
- return false;
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+using std::endl;
+
+class CmdCloneCollection : public Command {
+public:
+ CmdCloneCollection() : Command("cloneCollection") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ std::string ns = parseNs(dbname, cmdObj);
+
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex); // SERVER-11418
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ return Status::OK();
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "{ cloneCollection: <collection>, from: <host> [,query: <query_filter>] "
+ "[,copyIndexes:<bool>] }"
+ "\nCopies a collection from one server to another. Do not use on a single server "
+ "as the destination "
+ "is placed at the same db.collection (namespace) as the source.\n";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ string fromhost = cmdObj.getStringField("from");
+ if (fromhost.empty()) {
+ errmsg = "missing 'from' parameter";
+ return false;
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- std::string ns = parseNs(dbname, cmdObj);
-
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex); // SERVER-11418
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ {
+ HostAndPort h(fromhost);
+ if (repl::isSelf(h)) {
+ errmsg = "can't cloneCollection from self";
+ return false;
}
- return Status::OK();
}
- virtual void help( stringstream &help ) const {
- help << "{ cloneCollection: <collection>, from: <host> [,query: <query_filter>] [,copyIndexes:<bool>] }"
- "\nCopies a collection from one server to another. Do not use on a single server as the destination "
- "is placed at the same db.collection (namespace) as the source.\n"
- ;
+ string collection = parseNs(dbname, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
+ if (!allowedWriteStatus.isOK()) {
+ return appendCommandStatus(result, allowedWriteStatus);
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
+ BSONObj query = cmdObj.getObjectField("query");
+ if (query.isEmpty())
+ query = BSONObj();
- string fromhost = cmdObj.getStringField("from");
- if ( fromhost.empty() ) {
- errmsg = "missing 'from' parameter";
- return false;
- }
-
- {
- HostAndPort h(fromhost);
- if (repl::isSelf(h)) {
- errmsg = "can't cloneCollection from self";
- return false;
- }
- }
-
- string collection = parseNs(dbname, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(dbname, collection);
- if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
- }
+ BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
+ bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
- BSONObj query = cmdObj.getObjectField("query");
- if ( query.isEmpty() )
- query = BSONObj();
+ log() << "cloneCollection. db:" << dbname << " collection:" << collection
+ << " from: " << fromhost << " query: " << query << " "
+ << (copyIndexes ? "" : ", not copying indexes") << endl;
- BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
- bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
-
- log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
- << " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
-
- Cloner cloner;
- unique_ptr<DBClientConnection> myconn;
- myconn.reset( new DBClientConnection() );
- if ( ! myconn->connect( HostAndPort(fromhost) , errmsg ) )
- return false;
+ Cloner cloner;
+ unique_ptr<DBClientConnection> myconn;
+ myconn.reset(new DBClientConnection());
+ if (!myconn->connect(HostAndPort(fromhost), errmsg))
+ return false;
- cloner.setConnection( myconn.release() );
+ cloner.setConnection(myconn.release());
- return cloner.copyCollection(txn, collection, query, errmsg, true, false, copyIndexes);
- }
+ return cloner.copyCollection(txn, collection, query, errmsg, true, false, copyIndexes);
+ }
- } cmdCloneCollection;
+} cmdCloneCollection;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index c9d3816a2b7..4f53833e975 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -46,117 +46,123 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- class CmdCloneCollectionAsCapped : public Command {
- public:
- CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream &help ) const {
- help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet sourceActions;
- sourceActions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), sourceActions));
-
- ActionSet targetActions;
- targetActions.addAction(ActionType::insert);
- targetActions.addAction(ActionType::createIndex);
- targetActions.addAction(ActionType::convertToCapped);
- std::string collection = cmdObj.getStringField("toCollection");
- uassert(16708, "bad 'toCollection' value", !collection.empty());
-
- out->push_back(Privilege(ResourcePattern::forExactNamespace(
- NamespaceString(dbname, collection)),
- targetActions));
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string from = jsobj.getStringField( "cloneCollectionAsCapped" );
- string to = jsobj.getStringField( "toCollection" );
- double size = jsobj.getField( "size" ).number();
- bool temp = jsobj.getField( "temp" ).trueValue();
-
- if ( from.empty() || to.empty() || size == 0 ) {
- errmsg = "invalid command spec";
- return false;
- }
-
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
-
- NamespaceString nss(dbname, to);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while cloning collection " << from << " to " << to
- << " (as capped)"));
- }
-
- Database* const db = autoDb.getDb();
- if (!db) {
- return appendCommandStatus(result,
- Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbname
- << " not found"));
- }
-
- Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp);
- return appendCommandStatus( result, status );
- }
- } cmdCloneCollectionAsCapped;
-
- /* jan2010:
- Converts the given collection to a capped collection w/ the specified size.
- This command is not highly used, and is not currently supported with sharded
- environments.
- */
- class CmdConvertToCapped : public Command {
- public:
- CmdConvertToCapped() : Command( "convertToCapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream &help ) const {
- help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::convertToCapped);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+class CmdCloneCollectionAsCapped : public Command {
+public:
+ CmdCloneCollectionAsCapped() : Command("cloneCollectionAsCapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet sourceActions;
+ sourceActions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), sourceActions));
+
+ ActionSet targetActions;
+ targetActions.addAction(ActionType::insert);
+ targetActions.addAction(ActionType::createIndex);
+ targetActions.addAction(ActionType::convertToCapped);
+ std::string collection = cmdObj.getStringField("toCollection");
+ uassert(16708, "bad 'toCollection' value", !collection.empty());
+
+ out->push_back(
+ Privilege(ResourcePattern::forExactNamespace(NamespaceString(dbname, collection)),
+ targetActions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string from = jsobj.getStringField("cloneCollectionAsCapped");
+ string to = jsobj.getStringField("toCollection");
+ double size = jsobj.getField("size").number();
+ bool temp = jsobj.getField("temp").trueValue();
+
+ if (from.empty() || to.empty() || size == 0) {
+ errmsg = "invalid command spec";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbname, MODE_X);
- string shortSource = jsobj.getStringField( "convertToCapped" );
- double size = jsobj.getField( "size" ).number();
+ NamespaceString nss(dbname, to);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while cloning collection " << from
+ << " to " << to << " (as capped)"));
+ }
- if (shortSource.empty() || size == 0) {
- errmsg = "invalid command spec";
- return false;
- }
+ Database* const db = autoDb.getDb();
+ if (!db) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbname << " not found"));
+ }
- return appendCommandStatus(result,
- convertToCapped(txn,
- NamespaceString(dbname, shortSource),
- size));
+ Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp);
+ return appendCommandStatus(result, status);
+ }
+} cmdCloneCollectionAsCapped;
+
+/* jan2010:
+ Converts the given collection to a capped collection w/ the specified size.
+ This command is not highly used, and is not currently supported with sharded
+ environments.
+ */
+class CmdConvertToCapped : public Command {
+public:
+ CmdConvertToCapped() : Command("convertToCapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::convertToCapped);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string shortSource = jsobj.getStringField("convertToCapped");
+ double size = jsobj.getField("size").number();
+
+ if (shortSource.empty() || size == 0) {
+ errmsg = "invalid command spec";
+ return false;
}
- } cmdConvertToCapped;
+ return appendCommandStatus(
+ result, convertToCapped(txn, NamespaceString(dbname, shortSource), size));
+ }
+} cmdConvertToCapped;
}
diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp
index e0c935a3f19..a71357cb53a 100644
--- a/src/mongo/db/commands/compact.cpp
+++ b/src/mongo/db/commands/compact.cpp
@@ -51,126 +51,134 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- class CompactCmd : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool maintenanceMode() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::compact);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- virtual void help( stringstream& help ) const {
- help << "compact collection\n"
- "warning: this operation locks the database and is slow. you can cancel with killOp()\n"
+using std::string;
+using std::stringstream;
+
+class CompactCmd : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::compact);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "compact collection\n"
+ "warning: this operation locks the database and is slow. you can cancel with "
+ "killOp()\n"
"{ compact : <collection_name>, [force:<bool>], [validate:<bool>],\n"
" [paddingFactor:<num>], [paddingBytes:<num>] }\n"
" force - allows to run on a replica set primary\n"
- " validate - check records are noncorrupt before adding to newly compacting extents. slower but safer (defaults to true in this version)\n";
+ " validate - check records are noncorrupt before adding to newly compacting "
+ "extents. slower but safer (defaults to true in this version)\n";
+ }
+ CompactCmd() : Command("compact") {}
+
+ virtual bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string nsToCompact = parseNsCollectionRequired(db, cmdObj);
+
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
+ errmsg =
+ "will not run compact on an active replica set primary as this is a slow blocking "
+ "operation. use force:true to force";
+ return false;
}
- CompactCmd() : Command("compact") { }
-
- virtual bool run(OperationContext* txn,
- const string& db,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string nsToCompact = parseNsCollectionRequired(db, cmdObj);
-
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- if (replCoord->getMemberState().primary() && !cmdObj["force"].trueValue()) {
- errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
- return false;
- }
- NamespaceString ns(nsToCompact);
- if ( !ns.isNormal() ) {
- errmsg = "bad namespace name";
- return false;
- }
+ NamespaceString ns(nsToCompact);
+ if (!ns.isNormal()) {
+ errmsg = "bad namespace name";
+ return false;
+ }
- if ( ns.isSystem() ) {
- // items in system.* cannot be moved as there might be pointers to them
- // i.e. system.indexes entries are pointed to from NamespaceDetails
- errmsg = "can't compact a system namespace";
- return false;
- }
+ if (ns.isSystem()) {
+ // items in system.* cannot be moved as there might be pointers to them
+ // i.e. system.indexes entries are pointed to from NamespaceDetails
+ errmsg = "can't compact a system namespace";
+ return false;
+ }
- CompactOptions compactOptions;
+ CompactOptions compactOptions;
- if ( cmdObj["preservePadding"].trueValue() ) {
- compactOptions.paddingMode = CompactOptions::PRESERVE;
- if ( cmdObj.hasElement( "paddingFactor" ) ||
- cmdObj.hasElement( "paddingBytes" ) ) {
- errmsg = "cannot mix preservePadding and paddingFactor|paddingBytes";
+ if (cmdObj["preservePadding"].trueValue()) {
+ compactOptions.paddingMode = CompactOptions::PRESERVE;
+ if (cmdObj.hasElement("paddingFactor") || cmdObj.hasElement("paddingBytes")) {
+ errmsg = "cannot mix preservePadding and paddingFactor|paddingBytes";
+ return false;
+ }
+ } else if (cmdObj.hasElement("paddingFactor") || cmdObj.hasElement("paddingBytes")) {
+ compactOptions.paddingMode = CompactOptions::MANUAL;
+ if (cmdObj.hasElement("paddingFactor")) {
+ compactOptions.paddingFactor = cmdObj["paddingFactor"].Number();
+ if (compactOptions.paddingFactor < 1 || compactOptions.paddingFactor > 4) {
+ errmsg = "invalid padding factor";
return false;
}
}
- else if ( cmdObj.hasElement( "paddingFactor" ) || cmdObj.hasElement( "paddingBytes" ) ) {
- compactOptions.paddingMode = CompactOptions::MANUAL;
- if ( cmdObj.hasElement("paddingFactor") ) {
- compactOptions.paddingFactor = cmdObj["paddingFactor"].Number();
- if ( compactOptions.paddingFactor < 1 ||
- compactOptions.paddingFactor > 4 ){
- errmsg = "invalid padding factor";
- return false;
- }
- }
- if ( cmdObj.hasElement("paddingBytes") ) {
- compactOptions.paddingBytes = cmdObj["paddingBytes"].numberInt();
- if ( compactOptions.paddingBytes < 0 ||
- compactOptions.paddingBytes > ( 1024 * 1024 ) ) {
- errmsg = "invalid padding bytes";
- return false;
- }
+ if (cmdObj.hasElement("paddingBytes")) {
+ compactOptions.paddingBytes = cmdObj["paddingBytes"].numberInt();
+ if (compactOptions.paddingBytes < 0 ||
+ compactOptions.paddingBytes > (1024 * 1024)) {
+ errmsg = "invalid padding bytes";
+ return false;
}
}
+ }
- if ( cmdObj.hasElement("validate") )
- compactOptions.validateDocuments = cmdObj["validate"].trueValue();
-
+ if (cmdObj.hasElement("validate"))
+ compactOptions.validateDocuments = cmdObj["validate"].trueValue();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, db, MODE_X);
- Database* const collDB = autoDb.getDb();
- Collection* collection = collDB ? collDB->getCollection(ns) : NULL;
- // If db/collection does not exist, short circuit and return.
- if ( !collDB || !collection ) {
- errmsg = "namespace does not exist";
- return false;
- }
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, db, MODE_X);
+ Database* const collDB = autoDb.getDb();
+ Collection* collection = collDB ? collDB->getCollection(ns) : NULL;
- OldClientContext ctx(txn, ns);
- BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
+ // If db/collection does not exist, short circuit and return.
+ if (!collDB || !collection) {
+ errmsg = "namespace does not exist";
+ return false;
+ }
- if ( collection->isCapped() ) {
- errmsg = "cannot compact a capped collection";
- return false;
- }
+ OldClientContext ctx(txn, ns);
+ BackgroundOperation::assertNoBgOpInProgForNs(ns.ns());
- log() << "compact " << ns << " begin, options: " << compactOptions.toString();
+ if (collection->isCapped()) {
+ errmsg = "cannot compact a capped collection";
+ return false;
+ }
- StatusWith<CompactStats> status = collection->compact( txn, &compactOptions );
- if ( !status.isOK() )
- return appendCommandStatus( result, status.getStatus() );
+ log() << "compact " << ns << " begin, options: " << compactOptions.toString();
- if ( status.getValue().corruptDocuments > 0 )
- result.append("invalidObjects", status.getValue().corruptDocuments );
+ StatusWith<CompactStats> status = collection->compact(txn, &compactOptions);
+ if (!status.isOK())
+ return appendCommandStatus(result, status.getStatus());
- log() << "compact " << ns << " end";
+ if (status.getValue().corruptDocuments > 0)
+ result.append("invalidObjects", status.getValue().corruptDocuments);
- return true;
- }
- };
- static CompactCmd compactCmd;
+ log() << "compact " << ns << " end";
+ return true;
+ }
+};
+static CompactCmd compactCmd;
}
diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp
index 06a4367c9ed..843b8b1728a 100644
--- a/src/mongo/db/commands/connection_status.cpp
+++ b/src/mongo/db/commands/connection_status.cpp
@@ -35,100 +35,99 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- class CmdConnectionStatus : public Command {
- public:
- CmdConnectionStatus() : Command("connectionStatus") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
-
- void help(stringstream& h) const {
- h << "Returns connection-specific information such as logged-in users and their roles";
+using std::string;
+using std::stringstream;
+
+class CmdConnectionStatus : public Command {
+public:
+ CmdConnectionStatus() : Command("connectionStatus") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+
+ void help(stringstream& h) const {
+ h << "Returns connection-specific information such as logged-in users and their roles";
+ }
+
+ bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationSession* authSession = AuthorizationSession::get(ClientBasic::getCurrent());
+
+ bool showPrivileges;
+ Status status =
+ bsonExtractBooleanFieldWithDefault(cmdObj, "showPrivileges", false, &showPrivileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string&, BSONObj& cmdObj, int, string& errmsg,
- BSONObjBuilder& result) {
- AuthorizationSession* authSession =
- AuthorizationSession::get(ClientBasic::getCurrent());
-
- bool showPrivileges;
- Status status = bsonExtractBooleanFieldWithDefault(cmdObj,
- "showPrivileges",
- false,
- &showPrivileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObjBuilder authInfo(result.subobjStart("authInfo"));
+ {
+ BSONArrayBuilder authenticatedUsers(authInfo.subarrayStart("authenticatedUsers"));
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
- BSONObjBuilder authInfo(result.subobjStart("authInfo"));
- {
- BSONArrayBuilder authenticatedUsers(authInfo.subarrayStart("authenticatedUsers"));
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
-
- for ( ; nameIter.more(); nameIter.next()) {
- BSONObjBuilder userInfoBuilder(authenticatedUsers.subobjStart());
- userInfoBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
- nameIter->getUser());
- userInfoBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME,
- nameIter->getDB());
- }
+ for (; nameIter.more(); nameIter.next()) {
+ BSONObjBuilder userInfoBuilder(authenticatedUsers.subobjStart());
+ userInfoBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
+ nameIter->getUser());
+ userInfoBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, nameIter->getDB());
}
- {
- BSONArrayBuilder authenticatedRoles(
- authInfo.subarrayStart("authenticatedUserRoles"));
- RoleNameIterator roleIter = authSession->getAuthenticatedRoleNames();
-
- for ( ; roleIter.more(); roleIter.next()) {
- BSONObjBuilder roleInfoBuilder(authenticatedRoles.subobjStart());
- roleInfoBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
- roleIter->getRole());
- roleInfoBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME,
- roleIter->getDB());
- }
+ }
+ {
+ BSONArrayBuilder authenticatedRoles(authInfo.subarrayStart("authenticatedUserRoles"));
+ RoleNameIterator roleIter = authSession->getAuthenticatedRoleNames();
+
+ for (; roleIter.more(); roleIter.next()) {
+ BSONObjBuilder roleInfoBuilder(authenticatedRoles.subobjStart());
+ roleInfoBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
+ roleIter->getRole());
+ roleInfoBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME, roleIter->getDB());
}
- if (showPrivileges) {
- BSONArrayBuilder authenticatedPrivileges(
- authInfo.subarrayStart("authenticatedUserPrivileges"));
-
- // Create a unified map of resources to privileges, to avoid duplicate
- // entries in the connection status output.
- User::ResourcePrivilegeMap unifiedResourcePrivilegeMap;
- UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
-
- for ( ; nameIter.more(); nameIter.next()) {
- User* authUser = authSession->lookupUser(*nameIter);
- const User::ResourcePrivilegeMap& resourcePrivilegeMap =
- authUser->getPrivileges();
- for (User::ResourcePrivilegeMap::const_iterator it =
- resourcePrivilegeMap.begin();
- it != resourcePrivilegeMap.end();
- ++it) {
- if (unifiedResourcePrivilegeMap.find(it->first) ==
- unifiedResourcePrivilegeMap.end()) {
- unifiedResourcePrivilegeMap[it->first] = it->second;
- } else {
- unifiedResourcePrivilegeMap[it->first].addActions(
- it->second.getActions());
- }
+ }
+ if (showPrivileges) {
+ BSONArrayBuilder authenticatedPrivileges(
+ authInfo.subarrayStart("authenticatedUserPrivileges"));
+
+ // Create a unified map of resources to privileges, to avoid duplicate
+ // entries in the connection status output.
+ User::ResourcePrivilegeMap unifiedResourcePrivilegeMap;
+ UserNameIterator nameIter = authSession->getAuthenticatedUserNames();
+
+ for (; nameIter.more(); nameIter.next()) {
+ User* authUser = authSession->lookupUser(*nameIter);
+ const User::ResourcePrivilegeMap& resourcePrivilegeMap = authUser->getPrivileges();
+ for (User::ResourcePrivilegeMap::const_iterator it = resourcePrivilegeMap.begin();
+ it != resourcePrivilegeMap.end();
+ ++it) {
+ if (unifiedResourcePrivilegeMap.find(it->first) ==
+ unifiedResourcePrivilegeMap.end()) {
+ unifiedResourcePrivilegeMap[it->first] = it->second;
+ } else {
+ unifiedResourcePrivilegeMap[it->first].addActions(it->second.getActions());
}
}
+ }
- for (User::ResourcePrivilegeMap::const_iterator it =
- unifiedResourcePrivilegeMap.begin();
- it != unifiedResourcePrivilegeMap.end();
- ++it) {
- authenticatedPrivileges << it->second.toBSON();
- }
+ for (User::ResourcePrivilegeMap::const_iterator it =
+ unifiedResourcePrivilegeMap.begin();
+ it != unifiedResourcePrivilegeMap.end();
+ ++it) {
+ authenticatedPrivileges << it->second.toBSON();
}
+ }
- authInfo.doneFast();
+ authInfo.doneFast();
- return true;
- }
- } cmdConnectionStatus;
+ return true;
+ }
+} cmdConnectionStatus;
}
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 328c24111f8..d41c3f50657 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -43,190 +43,190 @@
namespace {
- using namespace mongo;
-
- using std::string;
- using std::stringstream;
-
- /* Usage:
- * admindb.$cmd.findOne( { copydb: 1, fromhost: <connection string>, fromdb: <db>,
- * todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
- *
- * The "copydb" command is used to copy a database. Note that this is a very broad definition.
- * This means that the "copydb" command can be used in the following ways:
- *
- * 1. To copy a database within a single node
- * 2. To copy a database within a sharded cluster, possibly to another shard
- * 3. To copy a database from one cluster to another
- *
- * Note that in all cases both the target and source database must be unsharded.
- *
- * The "copydb" command gets sent by the client or the mongos to the destination of the copy
- * operation. The node, cluster, or shard that recieves the "copydb" command must then query
- * the source of the database to be copied for all the contents and metadata of the database.
- *
- *
- *
- * When used with auth, there are two different considerations.
- *
- * The first is authentication with the target. The only entity that needs to authenticate with
- * the target node is the client, so authentication works there the same as it would with any
- * other command.
- *
- * The second is the authentication of the target with the source, which is needed because the
- * target must query the source directly for the contents of the database. To do this, the
- * client must use the "copydbgetnonce" command, in which the target will get a nonce from the
- * source and send it back to the client. The client can then hash its password with the nonce,
- * send it to the target when it runs the "copydb" command, which can then use that information
- * to authenticate with the source.
- *
- * NOTE: mongos doesn't know how to call or handle the "copydbgetnonce" command. See
- * SERVER-6427.
- *
- * NOTE: Since internal cluster auth works differently, "copydb" currently doesn't work between
- * shards in a cluster when auth is enabled. See SERVER-13080.
- */
- class CmdCopyDb : public Command {
- public:
- CmdCopyDb() : Command("copydb") { }
-
- virtual bool adminOnly() const {
- return true;
+using namespace mongo;
+
+using std::string;
+using std::stringstream;
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydb: 1, fromhost: <connection string>, fromdb: <db>,
+ * todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
+ *
+ * The "copydb" command is used to copy a database. Note that this is a very broad definition.
+ * This means that the "copydb" command can be used in the following ways:
+ *
+ * 1. To copy a database within a single node
+ * 2. To copy a database within a sharded cluster, possibly to another shard
+ * 3. To copy a database from one cluster to another
+ *
+ * Note that in all cases both the target and source database must be unsharded.
+ *
+ * The "copydb" command gets sent by the client or the mongos to the destination of the copy
+ * operation. The node, cluster, or shard that recieves the "copydb" command must then query
+ * the source of the database to be copied for all the contents and metadata of the database.
+ *
+ *
+ *
+ * When used with auth, there are two different considerations.
+ *
+ * The first is authentication with the target. The only entity that needs to authenticate with
+ * the target node is the client, so authentication works there the same as it would with any
+ * other command.
+ *
+ * The second is the authentication of the target with the source, which is needed because the
+ * target must query the source directly for the contents of the database. To do this, the
+ * client must use the "copydbgetnonce" command, in which the target will get a nonce from the
+ * source and send it back to the client. The client can then hash its password with the nonce,
+ * send it to the target when it runs the "copydb" command, which can then use that information
+ * to authenticate with the source.
+ *
+ * NOTE: mongos doesn't know how to call or handle the "copydbgetnonce" command. See
+ * SERVER-6427.
+ *
+ * NOTE: Since internal cluster auth works differently, "copydb" currently doesn't work between
+ * shards in a cluster when auth is enabled. See SERVER-13080.
+ */
+class CmdCopyDb : public Command {
+public:
+ CmdCopyDb() : Command("copydb") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "copy a database from another host to this host\n";
+ help << "usage: {copydb: 1, fromhost: <connection string>, fromdb: <db>, todb: <db>"
+ << "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ string fromhost = cmdObj.getStringField("fromhost");
+ bool fromSelf = fromhost.empty();
+ if (fromSelf) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromhost = ss.str();
}
- virtual bool slaveOk() const {
+ CloneOptions cloneOptions;
+ cloneOptions.fromDB = cmdObj.getStringField("fromdb");
+ cloneOptions.slaveOk = cmdObj["slaveOk"].trueValue();
+ cloneOptions.useReplAuth = false;
+ cloneOptions.snapshot = true;
+ cloneOptions.mayYield = true;
+ cloneOptions.mayBeInterrupted = false;
+
+ string todb = cmdObj.getStringField("todb");
+ if (fromhost.empty() || todb.empty() || cloneOptions.fromDB.empty()) {
+ errmsg =
+ "params missing - {copydb: 1, fromhost: <connection string>, "
+ "fromdb: <db>, todb: <db>}";
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
+ if (!NamespaceString::validDBName(todb)) {
+ errmsg = "invalid todb name: " + todb;
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "copy a database from another host to this host\n";
- help << "usage: {copydb: 1, fromhost: <connection string>, fromdb: <db>, todb: <db>"
- << "[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
- }
+ Cloner cloner;
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- string fromhost = cmdObj.getStringField("fromhost");
- bool fromSelf = fromhost.empty();
- if ( fromSelf ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromhost = ss.str();
- }
+ // Get MONGODB-CR parameters
+ string username = cmdObj.getStringField("username");
+ string nonce = cmdObj.getStringField("nonce");
+ string key = cmdObj.getStringField("key");
- CloneOptions cloneOptions;
- cloneOptions.fromDB = cmdObj.getStringField("fromdb");
- cloneOptions.slaveOk = cmdObj["slaveOk"].trueValue();
- cloneOptions.useReplAuth = false;
- cloneOptions.snapshot = true;
- cloneOptions.mayYield = true;
- cloneOptions.mayBeInterrupted = false;
-
- string todb = cmdObj.getStringField("todb");
- if ( fromhost.empty() || todb.empty() || cloneOptions.fromDB.empty() ) {
- errmsg = "params missing - {copydb: 1, fromhost: <connection string>, "
- "fromdb: <db>, todb: <db>}";
- return false;
- }
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- if ( !NamespaceString::validDBName( todb ) ) {
- errmsg = "invalid todb name: " + todb;
- return false;
- }
-
- Cloner cloner;
-
- // Get MONGODB-CR parameters
- string username = cmdObj.getStringField( "username" );
- string nonce = cmdObj.getStringField( "nonce" );
- string key = cmdObj.getStringField( "key" );
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
-
- if ( !username.empty() && !nonce.empty() && !key.empty() ) {
- uassert( 13008, "must call copydbgetnonce first", authConn.get() );
- BSONObj ret;
- {
- if ( !authConn->runCommand( cloneOptions.fromDB,
- BSON( "authenticate" << 1 << "user" << username
- << "nonce" << nonce << "key" << key ), ret ) ) {
- errmsg = "unable to login " + ret.toString();
- authConn.reset();
- return false;
- }
- }
- cloner.setConnection( authConn.release() );
- }
- else if (cmdObj.hasField(saslCommandConversationIdFieldName) &&
- cmdObj.hasField(saslCommandPayloadFieldName)) {
- uassert( 25487, "must call copydbsaslstart first", authConn.get() );
- BSONObj ret;
- if ( !authConn->runCommand( cloneOptions.fromDB,
- BSON( "saslContinue" << 1 <<
- cmdObj[saslCommandConversationIdFieldName] <<
- cmdObj[saslCommandPayloadFieldName] ),
- ret ) ) {
+ if (!username.empty() && !nonce.empty() && !key.empty()) {
+ uassert(13008, "must call copydbgetnonce first", authConn.get());
+ BSONObj ret;
+ {
+ if (!authConn->runCommand(cloneOptions.fromDB,
+ BSON("authenticate" << 1 << "user" << username << "nonce"
+ << nonce << "key" << key),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
}
-
- if (!ret["done"].Bool()) {
- result.appendElements( ret );
- return true;
- }
-
- result.append("done", true);
- cloner.setConnection( authConn.release() );
}
- else if (!fromSelf) {
- // If fromSelf leave the cloner's conn empty, it will use a DBDirectClient instead.
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
+ cloner.setConnection(authConn.release());
+ } else if (cmdObj.hasField(saslCommandConversationIdFieldName) &&
+ cmdObj.hasField(saslCommandPayloadFieldName)) {
+ uassert(25487, "must call copydbsaslstart first", authConn.get());
+ BSONObj ret;
+ if (!authConn->runCommand(cloneOptions.fromDB,
+ BSON("saslContinue"
+ << 1 << cmdObj[saslCommandConversationIdFieldName]
+ << cmdObj[saslCommandPayloadFieldName]),
+ ret)) {
+ errmsg = "unable to login " + ret.toString();
+ authConn.reset();
+ return false;
+ }
- DBClientBase* conn = cs.connect(errmsg);
- if (!conn) {
- return false;
- }
- cloner.setConnection(conn);
+ if (!ret["done"].Bool()) {
+ result.appendElements(ret);
+ return true;
}
- // Either we didn't need the authConn (if we even had one), or we already moved it
- // into the cloner so just make sure we don't keep it around if we don't need it.
- authConn.reset();
+ result.append("done", true);
+ cloner.setConnection(authConn.release());
+ } else if (!fromSelf) {
+ // If fromSelf leave the cloner's conn empty, it will use a DBDirectClient instead.
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
- if (fromSelf) {
- // SERVER-4328 todo lock just the two db's not everything for the fromself case
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
- }
- else {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), todb, MODE_X);
- uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ DBClientBase* conn = cs.connect(errmsg);
+ if (!conn) {
+ return false;
}
+ cloner.setConnection(conn);
+ }
- return true;
+ // Either we didn't need the authConn (if we even had one), or we already moved it
+ // into the cloner so just make sure we don't keep it around if we don't need it.
+ authConn.reset();
+
+ if (fromSelf) {
+ // SERVER-4328 todo lock just the two db's not everything for the fromself case
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
+ } else {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), todb, MODE_X);
+ uassertStatusOK(cloner.copyDb(txn, todb, fromhost, cloneOptions, NULL));
}
- } cmdCopyDB;
+ return true;
+ }
+
+} cmdCopyDB;
-} // namespace
+} // namespace
diff --git a/src/mongo/db/commands/copydb.h b/src/mongo/db/commands/copydb.h
index f7b2adfbe6d..3da70ccd01a 100644
--- a/src/mongo/db/commands/copydb.h
+++ b/src/mongo/db/commands/copydb.h
@@ -36,15 +36,13 @@
namespace mongo {
- class ClientBasic;
+class ClientBasic;
namespace copydb {
- Status checkAuthForCopydbCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-} // namespace copydb
-} // namespace mongo
-
+Status checkAuthForCopydbCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+} // namespace copydb
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_common.cpp b/src/mongo/db/commands/copydb_common.cpp
index b72a91a9310..5f033aede73 100644
--- a/src/mongo/db/commands/copydb_common.cpp
+++ b/src/mongo/db/commands/copydb_common.cpp
@@ -43,63 +43,63 @@
namespace mongo {
namespace copydb {
- Status checkAuthForCopydbCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- bool fromSelf = StringData(cmdObj.getStringField("fromhost")).empty();
- StringData fromdb = cmdObj.getStringField("fromdb");
- StringData todb = cmdObj.getStringField("todb");
+Status checkAuthForCopydbCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ bool fromSelf = StringData(cmdObj.getStringField("fromhost")).empty();
+ StringData fromdb = cmdObj.getStringField("fromdb");
+ StringData todb = cmdObj.getStringField("todb");
- // get system collections
- std::vector<std::string> legalClientSystemCollections;
- legalClientSystemCollections.push_back("system.js");
- if (fromdb == "admin") {
- legalClientSystemCollections.push_back("system.users");
- legalClientSystemCollections.push_back("system.roles");
- legalClientSystemCollections.push_back("system.version");
- } else if (fromdb == "local") { // TODO(spencer): shouldn't be possible. See SERVER-11383
- legalClientSystemCollections.push_back("system.replset");
- }
+ // get system collections
+ std::vector<std::string> legalClientSystemCollections;
+ legalClientSystemCollections.push_back("system.js");
+ if (fromdb == "admin") {
+ legalClientSystemCollections.push_back("system.users");
+ legalClientSystemCollections.push_back("system.roles");
+ legalClientSystemCollections.push_back("system.version");
+ } else if (fromdb == "local") { // TODO(spencer): shouldn't be possible. See SERVER-11383
+ legalClientSystemCollections.push_back("system.replset");
+ }
- // Check authorization on destination db
- ActionSet actions;
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
+ // Check authorization on destination db
+ ActionSet actions;
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actions.addAction(ActionType::bypassDocumentValidation);
+ }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(todb), actions)) {
+ if (!AuthorizationSession::get(client)
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(todb), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ actions.removeAllActions();
+ actions.addAction(ActionType::insert);
+ for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
+ NamespaceString(todb, legalClientSystemCollections[i]), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
+ }
+ if (fromSelf) {
+ // If copying from self, also require privileges on source db
actions.removeAllActions();
- actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::find);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(fromdb), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(todb, legalClientSystemCollections[i]), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- }
-
- if (fromSelf) {
- // If copying from self, also require privileges on source db
- actions.removeAllActions();
- actions.addAction(ActionType::find);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(fromdb), actions)) {
+ NamespaceString(fromdb, legalClientSystemCollections[i]), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- for (size_t i = 0; i < legalClientSystemCollections.size(); ++i) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(fromdb, legalClientSystemCollections[i]), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- }
}
- return Status::OK();
}
+ return Status::OK();
+}
-} // namespace copydb
-} // namespace mongo
+} // namespace copydb
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 70434e11f1b..078ddca6039 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -48,178 +48,172 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
+
+namespace {
+const auto authConnection = Client::declareDecoration<std::unique_ptr<DBClientBase>>();
+} // namespace
+
+std::unique_ptr<DBClientBase>& CopyDbAuthConnection::forClient(Client* client) {
+ return authConnection(client);
+}
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <connection string> } );
+ *
+ * Run against the mongod that is the intended target for the "copydb" command. Used to get a
+ * nonce from the source of a "copydb" operation for authentication purposes. See the
+ * description of the "copydb" command below.
+ */
+class CmdCopyDbGetNonce : public Command {
+public:
+ CmdCopyDbGetNonce() : Command("copydbgetnonce") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
- namespace {
- const auto authConnection =
- Client::declareDecoration<std::unique_ptr<DBClientBase>>();
- } // namespace
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- std::unique_ptr<DBClientBase>& CopyDbAuthConnection::forClient(Client* client) {
- return authConnection(client);
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
}
- /* Usage:
- * admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <connection string> } );
- *
- * Run against the mongod that is the intended target for the "copydb" command. Used to get a
- * nonce from the source of a "copydb" operation for authentication purposes. See the
- * description of the "copydb" command below.
- */
- class CmdCopyDbGetNonce : public Command {
- public:
- CmdCopyDbGetNonce() : Command("copydbgetnonce") { }
-
- virtual bool adminOnly() const {
- return true;
+ virtual void help(stringstream& help) const {
+ help << "get a nonce for subsequent copy db request from secure server\n";
+ help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string fromhost = cmdObj.getStringField("fromhost");
+ if (fromhost.empty()) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromhost = ss.str();
}
- virtual bool slaveOk() const {
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
+
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ authConn.reset(cs.connect(errmsg));
+ if (!authConn) {
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ BSONObj ret;
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // No auth required
+ if (!authConn->runCommand("admin", BSON("getnonce" << 1), ret)) {
+ errmsg = "couldn't get nonce " + ret.toString();
+ authConn.reset();
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "get a nonce for subsequent copy db request from secure server\n";
- help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
- }
+ result.appendElements(ret);
+ return true;
+ }
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- string fromhost = cmdObj.getStringField("fromhost");
- if ( fromhost.empty() ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromhost = ss.str();
- }
-
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromhost)));
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- authConn.reset(cs.connect(errmsg));
- if (!authConn) {
- return false;
- }
-
- BSONObj ret;
-
- if( !authConn->runCommand( "admin", BSON( "getnonce" << 1 ), ret ) ) {
- errmsg = "couldn't get nonce " + ret.toString();
- authConn.reset();
- return false;
- }
-
- result.appendElements( ret );
- return true;
+} cmdCopyDBGetNonce;
+
+/* Usage:
+ * admindb.$cmd.findOne( { copydbsaslstart: 1,
+ * fromhost: <connection string>,
+ * mechanism: <String>,
+ * payload: <BinaryOrString> } );
+ *
+ * Run against the mongod that is the intended target for the "copydb" command. Used to
+ * initialize a SASL auth session for a "copydb" operation for authentication purposes.
+ */
+class CmdCopyDbSaslStart : public Command {
+public:
+ CmdCopyDbSaslStart() : Command("copydbsaslstart") {}
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ // No auth required
+ return Status::OK();
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "Initialize a SASL auth session for subsequent copy db request "
+ "from secure server\n";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string&,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fromDb = cmdObj.getStringField("fromdb");
+
+ string fromHost = cmdObj.getStringField("fromhost");
+ if (fromHost.empty()) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << serverGlobalParams.port;
+ fromHost = ss.str();
}
- } cmdCopyDBGetNonce;
-
- /* Usage:
- * admindb.$cmd.findOne( { copydbsaslstart: 1,
- * fromhost: <connection string>,
- * mechanism: <String>,
- * payload: <BinaryOrString> } );
- *
- * Run against the mongod that is the intended target for the "copydb" command. Used to
- * initialize a SASL auth session for a "copydb" operation for authentication purposes.
- */
- class CmdCopyDbSaslStart : public Command {
- public:
- CmdCopyDbSaslStart() : Command("copydbsaslstart") { }
-
- virtual bool adminOnly() const {
- return true;
+ const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromHost)));
+
+ BSONElement mechanismElement;
+ Status status = bsonExtractField(cmdObj, saslCommandMechanismFieldName, &mechanismElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool slaveOk() const {
+ BSONElement payloadElement;
+ status = bsonExtractField(cmdObj, saslCommandPayloadFieldName, &payloadElement);
+ if (!status.isOK()) {
+ log() << "Failed to extract payload: " << status;
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- // No auth required
- return Status::OK();
+ auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
+ authConn.reset(cs.connect(errmsg));
+ if (!authConn.get()) {
+ return false;
}
- virtual void help( stringstream &help ) const {
- help << "Initialize a SASL auth session for subsequent copy db request "
- "from secure server\n";
+ BSONObj ret;
+ if (!authConn->runCommand(
+ fromDb, BSON("saslStart" << 1 << mechanismElement << payloadElement), ret)) {
+ authConn.reset();
+ return appendCommandStatus(result, Command::getStatusFromCommandResult(ret));
}
- virtual bool run(OperationContext* txn,
- const string&,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- const string fromDb = cmdObj.getStringField("fromdb");
-
- string fromHost = cmdObj.getStringField("fromhost");
- if ( fromHost.empty() ) {
- /* copy from self */
- stringstream ss;
- ss << "localhost:" << serverGlobalParams.port;
- fromHost = ss.str();
- }
-
- const ConnectionString cs(uassertStatusOK(ConnectionString::parse(fromHost)));
-
- BSONElement mechanismElement;
- Status status = bsonExtractField(cmdObj,
- saslCommandMechanismFieldName,
- &mechanismElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- BSONElement payloadElement;
- status = bsonExtractField(cmdObj, saslCommandPayloadFieldName, &payloadElement);
- if (!status.isOK()) {
- log() << "Failed to extract payload: " << status;
- return false;
- }
-
- auto& authConn = CopyDbAuthConnection::forClient(txn->getClient());
- authConn.reset(cs.connect(errmsg));
- if (!authConn.get()) {
- return false;
- }
-
- BSONObj ret;
- if( !authConn->runCommand( fromDb,
- BSON( "saslStart" << 1 <<
- mechanismElement <<
- payloadElement),
- ret ) ) {
- authConn.reset();
- return appendCommandStatus(result,
- Command::getStatusFromCommandResult(ret));
-
- }
-
- result.appendElements( ret );
- return true;
- }
+ result.appendElements(ret);
+ return true;
+ }
- } cmdCopyDBSaslStart;
+} cmdCopyDBSaslStart;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/copydb_start_commands.h b/src/mongo/db/commands/copydb_start_commands.h
index c4c80e1dbb4..61f3313f918 100644
--- a/src/mongo/db/commands/copydb_start_commands.h
+++ b/src/mongo/db/commands/copydb_start_commands.h
@@ -32,10 +32,10 @@
namespace mongo {
- class Client;
+class Client;
- struct CopyDbAuthConnection {
- static std::unique_ptr<DBClientBase>& forClient(Client* client);
- };
+struct CopyDbAuthConnection {
+ static std::unique_ptr<DBClientBase>& forClient(Client* client);
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp
index 7f4e0bf3a65..91838325936 100644
--- a/src/mongo/db/commands/count_cmd.cpp
+++ b/src/mongo/db/commands/count_cmd.cpp
@@ -44,121 +44,130 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- /**
- * Implements the MongoD side of the count command.
- */
- class CmdCount : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- CmdCount() : Command("count") { }
- virtual bool slaveOk() const {
- // ok on --slave setups
- return repl::getGlobalReplicationCoordinator()->getSettings().slave == repl::SimpleSlave;
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+/**
+ * Implements the MongoD side of the count command.
+ */
+class CmdCount : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ CmdCount() : Command("count") {}
+ virtual bool slaveOk() const {
+ // ok on --slave setups
+ return repl::getGlobalReplicationCoordinator()->getSettings().slave == repl::SimpleSlave;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "count objects in collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ auto request = CountRequest::parseFromBSON(dbname, cmdObj);
+ if (!request.isOK()) {
+ return request.getStatus();
}
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool maintenanceOk() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual void help( stringstream& help ) const { help << "count objects in collection"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+
+ // Acquire the db read lock.
+ AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
+ Collection* collection = ctx.getCollection();
+
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into count.
+ RangePreserver preserver(collection);
+
+ PlanExecutor* rawExec;
+ Status getExecStatus = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ true, // explain
+ PlanExecutor::YIELD_AUTO,
+ &rawExec);
+ if (!getExecStatus.isOK()) {
+ return getExecStatus;
}
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
-
- auto request = CountRequest::parseFromBSON(dbname, cmdObj);
- if (!request.isOK()) {
- return request.getStatus();
- }
-
- // Acquire the db read lock.
- AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
- Collection* collection = ctx.getCollection();
-
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into count.
- RangePreserver preserver(collection);
-
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- true, // explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
- }
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto request = CountRequest::parseFromBSON(dbname, cmdObj);
+ if (!request.isOK()) {
+ return appendCommandStatus(result, request.getStatus());
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int, string& errmsg,
- BSONObjBuilder& result) {
-
- auto request = CountRequest::parseFromBSON(dbname, cmdObj);
- if (!request.isOK()) {
- return appendCommandStatus(result, request.getStatus());
- }
-
- AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
- Collection* collection = ctx.getCollection();
-
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into count.
- RangePreserver preserver(collection);
-
- PlanExecutor* rawExec;
- Status getExecStatus = getExecutorCount(txn,
- collection,
- request.getValue(),
- false, // !explain
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(result, getExecStatus);
- }
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- // Store the plan summary string in CurOp.
- if (NULL != CurOp::get(txn)) {
- CurOp::get(txn)->debug().planSummary = Explain::getPlanSummary(exec.get());
- }
-
- Status execPlanStatus = exec->executePlan();
- if (!execPlanStatus.isOK()) {
- return appendCommandStatus(result, execPlanStatus);
- }
-
- // Plan is done executing. We just need to pull the count out of the root stage.
- invariant(STAGE_COUNT == exec->getRootStage()->stageType());
- CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
- const CountStats* countStats =
- static_cast<const CountStats*>(countStage->getSpecificStats());
-
- result.appendNumber("n", countStats->nCounted);
- return true;
+ AutoGetCollectionForRead ctx(txn, request.getValue().getNs());
+ Collection* collection = ctx.getCollection();
+
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into count.
+ RangePreserver preserver(collection);
+
+ PlanExecutor* rawExec;
+ Status getExecStatus = getExecutorCount(txn,
+ collection,
+ request.getValue(),
+ false, // !explain
+ PlanExecutor::YIELD_AUTO,
+ &rawExec);
+ if (!getExecStatus.isOK()) {
+ return appendCommandStatus(result, getExecStatus);
}
- } cmdCount;
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Store the plan summary string in CurOp.
+ if (NULL != CurOp::get(txn)) {
+ CurOp::get(txn)->debug().planSummary = Explain::getPlanSummary(exec.get());
+ }
+
+ Status execPlanStatus = exec->executePlan();
+ if (!execPlanStatus.isOK()) {
+ return appendCommandStatus(result, execPlanStatus);
+ }
+
+ // Plan is done executing. We just need to pull the count out of the root stage.
+ invariant(STAGE_COUNT == exec->getRootStage()->stageType());
+ CountStage* countStage = static_cast<CountStage*>(exec->getRootStage());
+ const CountStats* countStats =
+ static_cast<const CountStats*>(countStage->getSpecificStats());
+
+ result.appendNumber("n", countStats->nCounted);
+ return true;
+ }
+
+} cmdCount;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/commands/cpuprofile.cpp b/src/mongo/db/commands/cpuprofile.cpp
index b50dd0233a5..4273666a70f 100644
--- a/src/mongo/db/commands/cpuprofile.cpp
+++ b/src/mongo/db/commands/cpuprofile.cpp
@@ -42,9 +42,9 @@
* The commands defined here, and profiling, are only available when enabled at
* build-time with the "--use-cpu-profiler" argument to scons.
*
- * Example SCons command line:
+ * Example SCons command line:
*
- * scons --release --use-cpu-profiler
+ * scons --release --use-cpu-profiler
*/
#include "gperftools/profiler.h"
@@ -63,102 +63,109 @@
namespace mongo {
- namespace {
-
- /**
- * Common code for the implementation of cpu profiler commands.
- */
- class CpuProfilerCommand : public Command {
- public:
- CpuProfilerCommand( char const *name ) : Command( name ) {}
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth( const BSONObj& cmdObj ) { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::cpuProfiler);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- // This is an abuse of the global dbmutex. We only really need to
- // ensure that only one cpuprofiler command runs at once; it would
- // be fine for it to run concurrently with other operations.
- virtual bool isWriteCommandForConfigServer() const { return true; }
- };
-
- /**
- * Class providing implementation of the _cpuProfilerStart command.
- */
- class CpuProfilerStartCommand : public CpuProfilerCommand {
- public:
- CpuProfilerStartCommand() : CpuProfilerCommand( commandName ) {}
-
- virtual bool run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result);
-
- static char const *const commandName;
- } cpuProfilerStartCommandInstance;
-
- /**
- * Class providing implementation of the _cpuProfilerStop command.
- */
- class CpuProfilerStopCommand : public CpuProfilerCommand {
- public:
- CpuProfilerStopCommand() : CpuProfilerCommand( commandName ) {}
-
- virtual bool run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result);
-
- static char const *const commandName;
- } cpuProfilerStopCommandInstance;
-
- char const *const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
- char const *const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
-
- bool CpuProfilerStartCommand::run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- // The lock here is just to prevent concurrency, nothing will write.
- OldClientContext ctx(txn, db);
-
- std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
- if ( ! ::ProfilerStart( profileFilename.c_str() ) ) {
- errmsg = "Failed to start profiler";
- return false;
- }
- return true;
- }
-
- bool CpuProfilerStopCommand::run( OperationContext* txn,
- std::string const &db,
- BSONObj &cmdObj,
- int options,
- std::string &errmsg,
- BSONObjBuilder &result) {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
- OldClientContext ctx(txn, db);
-
- ::ProfilerStop();
- return true;
- }
-
- } // namespace
+namespace {
-} // namespace mongo
+/**
+ * Common code for the implementation of cpu profiler commands.
+ */
+class CpuProfilerCommand : public Command {
+public:
+ CpuProfilerCommand(char const* name) : Command(name) {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::cpuProfiler);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ // This is an abuse of the global dbmutex. We only really need to
+ // ensure that only one cpuprofiler command runs at once; it would
+ // be fine for it to run concurrently with other operations.
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+};
+
+/**
+ * Class providing implementation of the _cpuProfilerStart command.
+ */
+class CpuProfilerStartCommand : public CpuProfilerCommand {
+public:
+ CpuProfilerStartCommand() : CpuProfilerCommand(commandName) {}
+ virtual bool run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ static char const* const commandName;
+} cpuProfilerStartCommandInstance;
+
+/**
+ * Class providing implementation of the _cpuProfilerStop command.
+ */
+class CpuProfilerStopCommand : public CpuProfilerCommand {
+public:
+ CpuProfilerStopCommand() : CpuProfilerCommand(commandName) {}
+
+ virtual bool run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ static char const* const commandName;
+} cpuProfilerStopCommandInstance;
+
+char const* const CpuProfilerStartCommand::commandName = "_cpuProfilerStart";
+char const* const CpuProfilerStopCommand::commandName = "_cpuProfilerStop";
+
+bool CpuProfilerStartCommand::run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
+ // The lock here is just to prevent concurrency, nothing will write.
+ OldClientContext ctx(txn, db);
+
+ std::string profileFilename = cmdObj[commandName]["profileFilename"].String();
+ if (!::ProfilerStart(profileFilename.c_str())) {
+ errmsg = "Failed to start profiler";
+ return false;
+ }
+ return true;
+}
+
+bool CpuProfilerStopCommand::run(OperationContext* txn,
+ std::string const& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), db, MODE_X);
+ OldClientContext ctx(txn, db);
+
+ ::ProfilerStop();
+ return true;
+}
+
+} // namespace
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 83dc5e72177..c18c33b7ce4 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -52,251 +52,261 @@
namespace mongo {
- using std::string;
-
- /**
- * { createIndexes : "bar", indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ] }
- */
- class CmdCreateIndex : public Command {
- public:
- CmdCreateIndex() : Command( "createIndexes" ){}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return false; } // TODO: this could be made true...
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::createIndex);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
+using std::string;
- BSONObj _addNsToSpec( const NamespaceString& ns, const BSONObj& obj ) {
- BSONObjBuilder b;
- b.append( "ns", ns );
- b.appendElements( obj );
- return b.obj();
+/**
+ * { createIndexes : "bar", indexes : [ { ns : "test.bar", key : { x : 1 }, name: "x_1" } ] }
+ */
+class CmdCreateIndex : public Command {
+public:
+ CmdCreateIndex() : Command("createIndexes") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ } // TODO: this could be made true...
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::createIndex);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+
+ BSONObj _addNsToSpec(const NamespaceString& ns, const BSONObj& obj) {
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.appendElements(obj);
+ return b.obj();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ // --- parse
+
+ NamespaceString ns(dbname, cmdObj[name].String());
+ Status status = userAllowedWriteNS(ns);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
+
+ if (cmdObj["indexes"].type() != Array) {
+ errmsg = "indexes has to be an array";
+ result.append("cmdObj", cmdObj);
+ return false;
}
- virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options,
- string& errmsg, BSONObjBuilder& result) {
- // --- parse
-
- NamespaceString ns( dbname, cmdObj[name].String() );
- Status status = userAllowedWriteNS( ns );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- if ( cmdObj["indexes"].type() != Array ) {
- errmsg = "indexes has to be an array";
- result.append( "cmdObj", cmdObj );
- return false;
+ std::vector<BSONObj> specs;
+ {
+ BSONObjIterator i(cmdObj["indexes"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != Object) {
+ errmsg = "everything in indexes has to be an Object";
+ result.append("cmdObj", cmdObj);
+ return false;
+ }
+ specs.push_back(e.Obj());
}
+ }
- std::vector<BSONObj> specs;
- {
- BSONObjIterator i( cmdObj["indexes"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() != Object ) {
- errmsg = "everything in indexes has to be an Object";
- result.append( "cmdObj", cmdObj );
- return false;
- }
- specs.push_back( e.Obj() );
- }
+ if (specs.size() == 0) {
+ errmsg = "no indexes to add";
+ return false;
+ }
+
+ // check specs
+ for (size_t i = 0; i < specs.size(); i++) {
+ BSONObj spec = specs[i];
+ if (spec["ns"].eoo()) {
+ spec = _addNsToSpec(ns, spec);
+ specs[i] = spec;
}
- if ( specs.size() == 0 ) {
- errmsg = "no indexes to add";
+ if (spec["ns"].type() != String) {
+ errmsg = "spec has no ns";
+ result.append("spec", spec);
return false;
}
-
- // check specs
- for ( size_t i = 0; i < specs.size(); i++ ) {
- BSONObj spec = specs[i];
- if ( spec["ns"].eoo() ) {
- spec = _addNsToSpec( ns, spec );
- specs[i] = spec;
- }
-
- if ( spec["ns"].type() != String ) {
- errmsg = "spec has no ns";
- result.append( "spec", spec );
- return false;
- }
- if ( ns != spec["ns"].String() ) {
- errmsg = "namespace mismatch";
- result.append( "spec", spec );
- return false;
- }
+ if (ns != spec["ns"].String()) {
+ errmsg = "namespace mismatch";
+ result.append("spec", spec);
+ return false;
}
+ }
- // now we know we have to create index(es)
- // Note: createIndexes command does not currently respect shard versioning.
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating indexes in " << ns.ns()));
- }
+ // now we know we have to create index(es)
+ // Note: createIndexes command does not currently respect shard versioning.
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), ns.db(), MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating indexes in " << ns.ns()));
+ }
- Database* db = dbHolder().get(txn, ns.db());
- if (!db) {
- db = dbHolder().openDb(txn, ns.db());
- }
+ Database* db = dbHolder().get(txn, ns.db());
+ if (!db) {
+ db = dbHolder().openDb(txn, ns.db());
+ }
- Collection* collection = db->getCollection( ns.ns() );
- result.appendBool( "createdCollectionAutomatically", collection == NULL );
- if ( !collection ) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, ns.ns(), CollectionOptions());
- invariant( collection );
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ Collection* collection = db->getCollection(ns.ns());
+ result.appendBool("createdCollectionAutomatically", collection == NULL);
+ if (!collection) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
+ collection = db->createCollection(txn, ns.ns(), CollectionOptions());
+ invariant(collection);
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ }
- const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
- result.append("numIndexesBefore", numIndexesBefore);
+ const int numIndexesBefore = collection->getIndexCatalog()->numIndexesTotal(txn);
+ result.append("numIndexesBefore", numIndexesBefore);
- MultiIndexBlock indexer(txn, collection);
- indexer.allowBackgroundBuilding();
- indexer.allowInterruption();
+ MultiIndexBlock indexer(txn, collection);
+ indexer.allowBackgroundBuilding();
+ indexer.allowInterruption();
- const size_t origSpecsSize = specs.size();
- indexer.removeExistingIndexes(&specs);
+ const size_t origSpecsSize = specs.size();
+ indexer.removeExistingIndexes(&specs);
- if (specs.size() == 0) {
- result.append("numIndexesAfter", numIndexesBefore);
- result.append( "note", "all indexes already exist" );
- return true;
- }
+ if (specs.size() == 0) {
+ result.append("numIndexesAfter", numIndexesBefore);
+ result.append("note", "all indexes already exist");
+ return true;
+ }
- if (specs.size() != origSpecsSize) {
- result.append( "note", "index already exists" );
- }
+ if (specs.size() != origSpecsSize) {
+ result.append("note", "index already exists");
+ }
- for ( size_t i = 0; i < specs.size(); i++ ) {
- const BSONObj& spec = specs[i];
- if ( spec["unique"].trueValue() ) {
- status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
+ for (size_t i = 0; i < specs.size(); i++) {
+ const BSONObj& spec = specs[i];
+ if (spec["unique"].trueValue()) {
+ status = checkUniqueIndexConstraints(txn, ns.ns(), spec["key"].Obj());
- if ( !status.isOK() ) {
- appendCommandStatus( result, status );
- return false;
- }
+ if (!status.isOK()) {
+ appendCommandStatus(result, status);
+ return false;
}
}
+ }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- uassertStatusOK(indexer.init(specs));
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ uassertStatusOK(indexer.init(specs));
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
- // If we're a background index, replace exclusive db lock with an intent lock, so that
- // other readers and writers can proceed during this phase.
- if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_IX);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating background indexes in " << ns.ns()));
- }
+ // If we're a background index, replace exclusive db lock with an intent lock, so that
+ // other readers and writers can proceed during this phase.
+ if (indexer.getBuildInBackground()) {
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_IX);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating background indexes in "
+ << ns.ns()));
}
+ }
- try {
- Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
- uassertStatusOK(indexer.insertAllDocumentsInCollection());
- }
- catch (const DBException& e) {
- invariant(e.getCode() != ErrorCodes::WriteConflict);
- // Must have exclusive DB lock before we clean up the index build via the
- // destructor of 'indexer'.
- if (indexer.getBuildInBackground()) {
- try {
- // This function cannot throw today, but we will preemptively prepare for
- // that day, to avoid data corruption due to lack of index cleanup.
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_X);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while creating background indexes in "
- << ns.ns() << ": cleaning up index build failure due to "
- << e.toString()));
- }
- }
- catch (...) {
- std::terminate();
+ try {
+ Lock::CollectionLock colLock(txn->lockState(), ns.ns(), MODE_IX);
+ uassertStatusOK(indexer.insertAllDocumentsInCollection());
+ } catch (const DBException& e) {
+ invariant(e.getCode() != ErrorCodes::WriteConflict);
+ // Must have exclusive DB lock before we clean up the index build via the
+ // destructor of 'indexer'.
+ if (indexer.getBuildInBackground()) {
+ try {
+ // This function cannot throw today, but we will preemptively prepare for
+ // that day, to avoid data corruption due to lack of index cleanup.
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_X);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while creating background indexes in "
+ << ns.ns() << ": cleaning up index build failure due to "
+ << e.toString()));
}
+ } catch (...) {
+ std::terminate();
}
- throw;
}
- // Need to return db lock back to exclusive, to complete the index build.
- if (indexer.getBuildInBackground()) {
- txn->recoveryUnit()->abandonSnapshot();
- dbLock.relockWithMode(MODE_X);
- uassert(ErrorCodes::NotMaster,
- str::stream() << "Not primary while completing index build in " << dbname,
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
-
- Database* db = dbHolder().get(txn, ns.db());
- uassert(28551, "database dropped during index build", db);
- uassert(28552, "collection dropped during index build",
- db->getCollection(ns.ns()));
- }
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ throw;
+ }
+ // Need to return db lock back to exclusive, to complete the index build.
+ if (indexer.getBuildInBackground()) {
+ txn->recoveryUnit()->abandonSnapshot();
+ dbLock.relockWithMode(MODE_X);
+ uassert(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while completing index build in " << dbname,
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns));
- indexer.commit();
+ Database* db = dbHolder().get(txn, ns.db());
+ uassert(28551, "database dropped during index build", db);
+ uassert(28552, "collection dropped during index build", db->getCollection(ns.ns()));
+ }
- for ( size_t i = 0; i < specs.size(); i++ ) {
- std::string systemIndexes = ns.getSystemIndexesCollection();
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(txn,
- systemIndexes,
- specs[i]);
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
+ indexer.commit();
- result.append( "numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn) );
+ for (size_t i = 0; i < specs.size(); i++) {
+ std::string systemIndexes = ns.getSystemIndexesCollection();
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(
+ txn, systemIndexes, specs[i]);
+ }
- return true;
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
- private:
- static Status checkUniqueIndexConstraints(OperationContext* txn,
- StringData ns,
- const BSONObj& newIdxKey) {
+ result.append("numIndexesAfter", collection->getIndexCatalog()->numIndexesTotal(txn));
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
+ return true;
+ }
- if ( shardingState.enabled() ) {
- CollectionMetadataPtr metadata(
- shardingState.getCollectionMetadata( ns.toString() ));
+private:
+ static Status checkUniqueIndexConstraints(OperationContext* txn,
+ StringData ns,
+ const BSONObj& newIdxKey) {
+ invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- if ( metadata ) {
- ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
- if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
- return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "cannot create unique index over " << newIdxKey
- << " with shard key pattern "
- << shardKeyPattern.toBSON());
- }
+ if (shardingState.enabled()) {
+ CollectionMetadataPtr metadata(shardingState.getCollectionMetadata(ns.toString()));
+
+ if (metadata) {
+ ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
+ if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "cannot create unique index over " << newIdxKey
+ << " with shard key pattern "
+ << shardKeyPattern.toBSON());
}
}
-
- return Status::OK();
}
- } cmdCreateIndex;
+ return Status::OK();
+ }
+} cmdCreateIndex;
}
diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp
index d28cb91874e..5107a43d5c5 100644
--- a/src/mongo/db/commands/current_op.cpp
+++ b/src/mongo/db/commands/current_op.cpp
@@ -48,114 +48,115 @@
namespace mongo {
- class CurrentOpCommand : public Command {
- public:
-
- CurrentOpCommand() : Command("currentOp") {}
+class CurrentOpCommand : public Command {
+public:
+ CurrentOpCommand() : Command("currentOp") {}
+
+ bool isWriteCommandForConfigServer() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::inprog);
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ const bool includeAll = cmdObj["$all"].trueValue();
+
+ // Filter the output
+ BSONObj filter;
+ {
+ BSONObjBuilder b;
+ BSONObjIterator i(cmdObj);
+ invariant(i.more());
+ i.next(); // skip {currentOp: 1} which is required to be the first element
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals("$all", e.fieldName())) {
+ continue;
+ }
- bool isWriteCommandForConfigServer() const final { return false; }
+ b.append(e);
+ }
+ filter = b.obj();
+ }
- bool slaveOk() const final { return true; }
+ const WhereCallbackReal whereCallback(txn, db);
+ const Matcher matcher(filter, whereCallback);
- bool adminOnly() const final { return true; }
+ BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
+ for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
+ Client* client = cursor.next();) {
+ invariant(client);
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::inprog);
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ stdx::lock_guard<Client> lk(*client);
+ const OperationContext* opCtx = client->getOperationContext();
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) final {
-
- const bool includeAll = cmdObj["$all"].trueValue();
-
- // Filter the output
- BSONObj filter;
- {
- BSONObjBuilder b;
- BSONObjIterator i(cmdObj);
- invariant(i.more());
- i.next(); // skip {currentOp: 1} which is required to be the first element
- while (i.more()) {
- BSONElement e = i.next();
- if (str::equals("$all", e.fieldName())) {
- continue;
- }
-
- b.append(e);
- }
- filter = b.obj();
+ if (!includeAll) {
+ // Skip over inactive connections.
+ if (!opCtx)
+ continue;
}
- const WhereCallbackReal whereCallback(txn, db);
- const Matcher matcher(filter, whereCallback);
-
- BSONArrayBuilder inprogBuilder(result.subarrayStart("inprog"));
-
- for (ServiceContext::LockedClientsCursor cursor(txn->getClient()->getServiceContext());
- Client* client = cursor.next();) {
+ BSONObjBuilder infoBuilder;
- invariant(client);
+ // The client information
+ client->reportState(infoBuilder);
- stdx::lock_guard<Client> lk(*client);
- const OperationContext* opCtx = client->getOperationContext();
-
- if (!includeAll) {
- // Skip over inactive connections.
- if (!opCtx)
- continue;
+ // Operation context specific information
+ infoBuilder.appendBool("active", static_cast<bool>(opCtx));
+ if (opCtx) {
+ infoBuilder.append("opid", opCtx->getOpID());
+ if (opCtx->isKillPending()) {
+ infoBuilder.append("killPending", true);
}
- BSONObjBuilder infoBuilder;
-
- // The client information
- client->reportState(infoBuilder);
+ CurOp::get(opCtx)->reportState(&infoBuilder);
- // Operation context specific information
- infoBuilder.appendBool("active", static_cast<bool>(opCtx));
- if (opCtx) {
- infoBuilder.append("opid", opCtx->getOpID());
- if (opCtx->isKillPending()) {
- infoBuilder.append("killPending", true);
- }
-
- CurOp::get(opCtx)->reportState(&infoBuilder);
-
- // LockState
- Locker::LockerInfo lockerInfo;
- opCtx->lockState()->getLockerInfo(&lockerInfo);
- fillLockerInfo(lockerInfo, infoBuilder);
- }
+ // LockState
+ Locker::LockerInfo lockerInfo;
+ opCtx->lockState()->getLockerInfo(&lockerInfo);
+ fillLockerInfo(lockerInfo, infoBuilder);
+ }
- infoBuilder.done();
+ infoBuilder.done();
- const BSONObj info = infoBuilder.obj();
+ const BSONObj info = infoBuilder.obj();
- if (includeAll || matcher.matches(info)) {
- inprogBuilder.append(info);
- }
+ if (includeAll || matcher.matches(info)) {
+ inprogBuilder.append(info);
}
+ }
- inprogBuilder.done();
-
- if (lockedForWriting()) {
- result.append("fsyncLock", true);
- result.append("info",
- "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
- }
+ inprogBuilder.done();
- return true;
+ if (lockedForWriting()) {
+ result.append("fsyncLock", true);
+ result.append("info",
+ "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
}
- } currentOpCommand;
+ return true;
+ }
+
+} currentOpCommand;
} // namespace mongo
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index 14a4ab955d5..dd9db449300 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -46,210 +46,197 @@
namespace mongo {
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::unique_ptr;
- using std::vector;
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- DBHashCmd dbhashCmd;
+DBHashCmd dbhashCmd;
- void logOpForDbHash(OperationContext* txn, const char* ns) {
- dbhashCmd.wipeCacheForCollection(txn, ns);
- }
+void logOpForDbHash(OperationContext* txn, const char* ns) {
+ dbhashCmd.wipeCacheForCollection(txn, ns);
+}
- // ----
+// ----
- DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {
- }
-
- void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbHash);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+DBHashCmd::DBHashCmd() : Command("dbHash", false, "dbhash") {}
- std::string DBHashCmd::hashCollection(OperationContext* opCtx,
- Database* db,
- const std::string& fullCollectionName,
- bool* fromCache) {
- stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
-
- if ( isCachable( fullCollectionName ) ) {
- cachedHashedLock.lock();
- string hash = _cachedHashed[fullCollectionName];
- if ( hash.size() > 0 ) {
- *fromCache = true;
- return hash;
- }
- }
+void DBHashCmd::addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbHash);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+}
- *fromCache = false;
- Collection* collection = db->getCollection( fullCollectionName );
- if ( !collection )
- return "";
-
- IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );
-
- unique_ptr<PlanExecutor> exec;
- if ( desc ) {
- exec.reset(InternalPlanner::indexScan(opCtx,
- collection,
- desc,
- BSONObj(),
- BSONObj(),
- false,
- InternalPlanner::FORWARD,
- InternalPlanner::IXSCAN_FETCH));
- }
- else if ( collection->isCapped() ) {
- exec.reset(InternalPlanner::collectionScan(opCtx,
- fullCollectionName,
- collection));
- }
- else {
- log() << "can't find _id index for: " << fullCollectionName << endl;
- return "no _id _index";
+std::string DBHashCmd::hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache) {
+ stdx::unique_lock<stdx::mutex> cachedHashedLock(_cachedHashedMutex, stdx::defer_lock);
+
+ if (isCachable(fullCollectionName)) {
+ cachedHashedLock.lock();
+ string hash = _cachedHashed[fullCollectionName];
+ if (hash.size() > 0) {
+ *fromCache = true;
+ return hash;
}
+ }
- md5_state_t st;
- md5_init(&st);
+ *fromCache = false;
+ Collection* collection = db->getCollection(fullCollectionName);
+ if (!collection)
+ return "";
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx);
+
+ unique_ptr<PlanExecutor> exec;
+ if (desc) {
+ exec.reset(InternalPlanner::indexScan(opCtx,
+ collection,
+ desc,
+ BSONObj(),
+ BSONObj(),
+ false,
+ InternalPlanner::FORWARD,
+ InternalPlanner::IXSCAN_FETCH));
+ } else if (collection->isCapped()) {
+ exec.reset(InternalPlanner::collectionScan(opCtx, fullCollectionName, collection));
+ } else {
+ log() << "can't find _id index for: " << fullCollectionName << endl;
+ return "no _id _index";
+ }
- long long n = 0;
- PlanExecutor::ExecState state;
- BSONObj c;
- verify(NULL != exec.get());
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
- md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
- n++;
- }
- if (PlanExecutor::IS_EOF != state) {
- warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
- }
- md5digest d;
- md5_finish(&st, d);
- string hash = digestToString( d );
+ md5_state_t st;
+ md5_init(&st);
- if (cachedHashedLock.owns_lock()) {
- _cachedHashed[fullCollectionName] = hash;
- }
+ long long n = 0;
+ PlanExecutor::ExecState state;
+ BSONObj c;
+ verify(NULL != exec.get());
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) {
+ md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize());
+ n++;
+ }
+ if (PlanExecutor::IS_EOF != state) {
+ warning() << "error while hashing, db dropped? ns=" << fullCollectionName << endl;
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString(d);
- return hash;
+ if (cachedHashedLock.owns_lock()) {
+ _cachedHashed[fullCollectionName] = hash;
}
- bool DBHashCmd::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer timer;
-
- set<string> desiredCollections;
- if ( cmdObj["collections"].type() == Array ) {
- BSONObjIterator i( cmdObj["collections"].Obj() );
- while ( i.more() ) {
- BSONElement e = i.next();
- if ( e.type() != String ) {
- errmsg = "collections entries have to be strings";
- return false;
- }
- desiredCollections.insert( e.String() );
- }
- }
+ return hash;
+}
- list<string> colls;
- const string ns = parseNs(dbname, cmdObj);
-
- // We lock the entire database in S-mode in order to ensure that the contents will not
- // change for the snapshot.
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, ns, MODE_S);
- Database* db = autoDb.getDb();
- if (db) {
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
- colls.sort();
+bool DBHashCmd::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer timer;
+
+ set<string> desiredCollections;
+ if (cmdObj["collections"].type() == Array) {
+ BSONObjIterator i(cmdObj["collections"].Obj());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() != String) {
+ errmsg = "collections entries have to be strings";
+ return false;
+ }
+ desiredCollections.insert(e.String());
}
+ }
- result.appendNumber( "numCollections" , (long long)colls.size() );
- result.append( "host" , prettyHostName() );
+ list<string> colls;
+ const string ns = parseNs(dbname, cmdObj);
+
+ // We lock the entire database in S-mode in order to ensure that the contents will not
+ // change for the snapshot.
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, ns, MODE_S);
+ Database* db = autoDb.getDb();
+ if (db) {
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls);
+ colls.sort();
+ }
- md5_state_t globalState;
- md5_init(&globalState);
+ result.appendNumber("numCollections", (long long)colls.size());
+ result.append("host", prettyHostName());
- vector<string> cached;
+ md5_state_t globalState;
+ md5_init(&globalState);
- BSONObjBuilder bb( result.subobjStart( "collections" ) );
- for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
- string fullCollectionName = *i;
- if ( fullCollectionName.size() -1 <= dbname.size() ) {
- errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
- return false;
- }
- string shortCollectionName = fullCollectionName.substr( dbname.size() + 1 );
+ vector<string> cached;
- if ( shortCollectionName.find( "system." ) == 0 )
- continue;
+ BSONObjBuilder bb(result.subobjStart("collections"));
+ for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) {
+ string fullCollectionName = *i;
+ if (fullCollectionName.size() - 1 <= dbname.size()) {
+ errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]";
+ return false;
+ }
+ string shortCollectionName = fullCollectionName.substr(dbname.size() + 1);
- if ( desiredCollections.size() > 0 &&
- desiredCollections.count( shortCollectionName ) == 0 )
- continue;
+ if (shortCollectionName.find("system.") == 0)
+ continue;
- bool fromCache = false;
- string hash = hashCollection( txn, db, fullCollectionName, &fromCache );
+ if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0)
+ continue;
- bb.append( shortCollectionName, hash );
+ bool fromCache = false;
+ string hash = hashCollection(txn, db, fullCollectionName, &fromCache);
- md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
- if ( fromCache )
- cached.push_back( fullCollectionName );
- }
- bb.done();
+ bb.append(shortCollectionName, hash);
- md5digest d;
- md5_finish(&globalState, d);
- string hash = digestToString( d );
+ md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size());
+ if (fromCache)
+ cached.push_back(fullCollectionName);
+ }
+ bb.done();
- result.append( "md5" , hash );
- result.appendNumber( "timeMillis", timer.millis() );
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString(d);
- result.append( "fromCache", cached );
+ result.append("md5", hash);
+ result.appendNumber("timeMillis", timer.millis());
- return 1;
- }
+ result.append("fromCache", cached);
- class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
- public:
- DBHashLogOpHandler(DBHashCmd* dCmd,
- StringData ns):
- _dCmd(dCmd),
- _ns(ns.toString()) {
+ return 1;
+}
- }
- void commit() {
- stdx::lock_guard<stdx::mutex> lk( _dCmd->_cachedHashedMutex );
- _dCmd->_cachedHashed.erase(_ns);
- }
- void rollback() { }
-
- private:
- DBHashCmd *_dCmd;
- const std::string _ns;
- };
-
- void DBHashCmd::wipeCacheForCollection(OperationContext* txn,
- StringData ns) {
- if ( !isCachable( ns ) )
- return;
- txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+class DBHashCmd::DBHashLogOpHandler : public RecoveryUnit::Change {
+public:
+ DBHashLogOpHandler(DBHashCmd* dCmd, StringData ns) : _dCmd(dCmd), _ns(ns.toString()) {}
+ void commit() {
+ stdx::lock_guard<stdx::mutex> lk(_dCmd->_cachedHashedMutex);
+ _dCmd->_cachedHashed.erase(_ns);
}
+ void rollback() {}
- bool DBHashCmd::isCachable( StringData ns ) const {
- return ns.startsWith( "config." );
- }
+private:
+ DBHashCmd* _dCmd;
+ const std::string _ns;
+};
+
+void DBHashCmd::wipeCacheForCollection(OperationContext* txn, StringData ns) {
+ if (!isCachable(ns))
+ return;
+ txn->recoveryUnit()->registerChange(new DBHashLogOpHandler(this, ns));
+}
+bool DBHashCmd::isCachable(StringData ns) const {
+ return ns.startsWith("config.");
+}
}
diff --git a/src/mongo/db/commands/dbhash.h b/src/mongo/db/commands/dbhash.h
index aa9a396b080..8b566f98327 100644
--- a/src/mongo/db/commands/dbhash.h
+++ b/src/mongo/db/commands/dbhash.h
@@ -35,41 +35,45 @@
namespace mongo {
- void logOpForDbHash( OperationContext* txn, const char* ns );
-
- class DBHashCmd : public Command {
- public:
- DBHashCmd();
-
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- void wipeCacheForCollection(OperationContext* txn, StringData ns);
-
- private:
-
- /**
- * RecoveryUnit::Change subclass used to commit work for dbhash logOp listener
- */
- class DBHashLogOpHandler;
-
- bool isCachable( StringData ns ) const;
-
- std::string hashCollection( OperationContext* opCtx, Database* db, const std::string& fullCollectionName, bool* fromCache );
-
- std::map<std::string,std::string> _cachedHashed;
- stdx::mutex _cachedHashedMutex;
-
- };
-
+void logOpForDbHash(OperationContext* txn, const char* ns);
+
+class DBHashCmd : public Command {
+public:
+ DBHashCmd();
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ void wipeCacheForCollection(OperationContext* txn, StringData ns);
+
+private:
+ /**
+ * RecoveryUnit::Change subclass used to commit work for dbhash logOp listener
+ */
+ class DBHashLogOpHandler;
+
+ bool isCachable(StringData ns) const;
+
+ std::string hashCollection(OperationContext* opCtx,
+ Database* db,
+ const std::string& fullCollectionName,
+ bool* fromCache);
+
+ std::map<std::string, std::string> _cachedHashed;
+ stdx::mutex _cachedHashedMutex;
+};
}
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index ceae947fc52..2c281140c63 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -47,137 +47,142 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::stringstream;
-
- class DistinctCommand : public Command {
- public:
- DistinctCommand() : Command("distinct") {}
-
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::string;
+using std::stringstream;
+
+class DistinctCommand : public Command {
+public:
+ DistinctCommand() : Command("distinct") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer t;
+
+ // ensure that the key is a string
+ uassert(18510,
+ mongoutils::str::stream() << "The first argument to the distinct command "
+ << "must be a string but was a "
+ << typeName(cmdObj["key"].type()),
+ cmdObj["key"].type() == mongo::String);
+
+ // ensure that the where clause is a document
+ if (cmdObj["query"].isNull() == false && cmdObj["query"].eoo() == false) {
+ uassert(18511,
+ mongoutils::str::stream() << "The query for the distinct command must be a "
+ << "document but was a "
+ << typeName(cmdObj["query"].type()),
+ cmdObj["query"].type() == mongo::Object);
}
- virtual void help( stringstream &help ) const {
- help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
- }
+ string key = cmdObj["key"].valuestrsafe();
+ BSONObj keyPattern = BSON(key << 1);
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- Timer t;
-
- // ensure that the key is a string
- uassert(18510,
- mongoutils::str::stream() << "The first argument to the distinct command " <<
- "must be a string but was a " << typeName(cmdObj["key"].type()),
- cmdObj["key"].type() == mongo::String);
-
- // ensure that the where clause is a document
- if( cmdObj["query"].isNull() == false && cmdObj["query"].eoo() == false ){
- uassert(18511,
- mongoutils::str::stream() << "The query for the distinct command must be a " <<
- "document but was a " << typeName(cmdObj["query"].type()),
- cmdObj["query"].type() == mongo::Object);
- }
+ BSONObj query = getQuery(cmdObj);
- string key = cmdObj["key"].valuestrsafe();
- BSONObj keyPattern = BSON( key << 1 );
+ int bufSize = BSONObjMaxUserSize - 4096;
+ BufBuilder bb(bufSize);
+ char* start = bb.buf();
- BSONObj query = getQuery( cmdObj );
+ BSONArrayBuilder arr(bb);
+ BSONElementSet values;
- int bufSize = BSONObjMaxUserSize - 4096;
- BufBuilder bb( bufSize );
- char * start = bb.buf();
+ const string ns = parseNs(dbname, cmdObj);
+ AutoGetCollectionForRead ctx(txn, ns);
- BSONArrayBuilder arr( bb );
- BSONElementSet values;
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ result.appendArray("values", BSONObj());
+ result.append("stats", BSON("n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0));
+ return true;
+ }
- const string ns = parseNs(dbname, cmdObj);
- AutoGetCollectionForRead ctx(txn, ns);
+ PlanExecutor* rawExec;
+ Status status =
+ getExecutorDistinct(txn, collection, query, key, PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!status.isOK()) {
+ uasserted(17216,
+ mongoutils::str::stream() << "Can't get executor for query " << query << ": "
+ << status.toString());
+ return 0;
+ }
- Collection* collection = ctx.getCollection();
- if (!collection) {
- result.appendArray( "values" , BSONObj() );
- result.append("stats", BSON("n" << 0 <<
- "nscanned" << 0 <<
- "nscannedObjects" << 0));
- return true;
- }
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // Distinct expands arrays.
+ //
+ // If our query is covered, each value of the key should be in the index key and
+ // available to us without this. If a collection scan is providing the data, we may
+ // have to expand an array.
+ BSONElementSet elts;
+ obj.getFieldsDotted(key, elts);
+
+ for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
+ BSONElement elt = *it;
+ if (values.count(elt)) {
+ continue;
+ }
+ int currentBufPos = bb.len();
- PlanExecutor* rawExec;
- Status status = getExecutorDistinct(txn,
- collection,
- query,
- key,
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!status.isOK()) {
- uasserted(17216, mongoutils::str::stream() << "Can't get executor for query "
- << query << ": " << status.toString());
- return 0;
- }
+ uassert(17217,
+ "distinct too big, 16mb cap",
+ (currentBufPos + elt.size() + 1024) < bufSize);
- unique_ptr<PlanExecutor> exec(rawExec);
-
- BSONObj obj;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // Distinct expands arrays.
- //
- // If our query is covered, each value of the key should be in the index key and
- // available to us without this. If a collection scan is providing the data, we may
- // have to expand an array.
- BSONElementSet elts;
- obj.getFieldsDotted(key, elts);
-
- for (BSONElementSet::iterator it = elts.begin(); it != elts.end(); ++it) {
- BSONElement elt = *it;
- if (values.count(elt)) { continue; }
- int currentBufPos = bb.len();
-
- uassert(17217, "distinct too big, 16mb cap",
- (currentBufPos + elt.size() + 1024) < bufSize);
-
- arr.append(elt);
- BSONElement x(start + currentBufPos);
- values.insert(x);
- }
+ arr.append(elt);
+ BSONElement x(start + currentBufPos);
+ values.insert(x);
}
+ }
- // Get summary information about the plan.
- PlanSummaryStats stats;
- Explain::getSummaryStats(exec.get(), &stats);
-
- verify( start == bb.buf() );
+ // Get summary information about the plan.
+ PlanSummaryStats stats;
+ Explain::getSummaryStats(exec.get(), &stats);
- result.appendArray( "values" , arr.done() );
+ verify(start == bb.buf());
- {
- BSONObjBuilder b;
- b.appendNumber( "n" , stats.nReturned );
- b.appendNumber( "nscanned" , stats.totalKeysExamined );
- b.appendNumber( "nscannedObjects" , stats.totalDocsExamined );
- b.appendNumber( "timems" , t.millis() );
- b.append( "planSummary" , Explain::getPlanSummary(exec.get()) );
- result.append( "stats" , b.obj() );
- }
+ result.appendArray("values", arr.done());
- return true;
+ {
+ BSONObjBuilder b;
+ b.appendNumber("n", stats.nReturned);
+ b.appendNumber("nscanned", stats.totalKeysExamined);
+ b.appendNumber("nscannedObjects", stats.totalDocsExamined);
+ b.appendNumber("timems", t.millis());
+ b.append("planSummary", Explain::getPlanSummary(exec.get()));
+ result.append("stats", b.obj());
}
- } distinctCmd;
+
+ return true;
+ }
+} distinctCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 52dfba38bf2..d0e6ac633a5 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -59,142 +59,142 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
- class CmdDropIndexes : public Command {
- public:
- virtual bool slaveOk() const {
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+/* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
+class CmdDropIndexes : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "drop indexes for a collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, jsobj);
+ return appendCommandStatus(result, dropIndexes(txn, NamespaceString(ns), jsobj, &result));
+ }
+
+} cmdDropIndexes;
+
+class CmdReIndex : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ } // can reindex on a secondary
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "re-index a collection";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::reIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ CmdReIndex() : Command("reIndex") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ DBDirectClient db(txn);
+
+ const std::string toDeleteNs = parseNsCollectionRequired(dbname, jsobj);
+
+ LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, toDeleteNs);
+
+ Collection* collection = ctx.db()->getCollection(toDeleteNs);
+
+ if (!collection) {
+ errmsg = "ns not found";
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "drop indexes for a collection";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
-
- CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, jsobj);
- return appendCommandStatus(result,
- dropIndexes(txn,
- NamespaceString(ns),
- jsobj,
- &result));
- }
-
- } cmdDropIndexes;
-
- class CmdReIndex : public Command {
- public:
- virtual bool slaveOk() const { return true; } // can reindex on a secondary
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "re-index a collection";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::reIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- CmdReIndex() : Command("reIndex") { }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- DBDirectClient db(txn);
-
- const std::string toDeleteNs = parseNsCollectionRequired(dbname, jsobj);
-
- LOG(0) << "CMD: reIndex " << toDeleteNs << endl;
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, toDeleteNs);
-
- Collection* collection = ctx.db()->getCollection( toDeleteNs );
-
- if ( !collection ) {
- errmsg = "ns not found";
- return false;
- }
-
- BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs );
-
- vector<BSONObj> all;
- {
- vector<string> indexNames;
- collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& name = indexNames[i];
- BSONObj spec = collection->getCatalogEntry()->getIndexSpec( txn, name );
- all.push_back(spec.removeField("v").getOwned());
-
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- errmsg = str::stream()
- << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
- << " For more info see http://dochub.mongodb.org/core/index-validation";
- return false;
- }
+ BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
+
+ vector<BSONObj> all;
+ {
+ vector<string> indexNames;
+ collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& name = indexNames[i];
+ BSONObj spec = collection->getCatalogEntry()->getIndexSpec(txn, name);
+ all.push_back(spec.removeField("v").getOwned());
+
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ errmsg = str::stream()
+ << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
+ << " For more info see http://dochub.mongodb.org/core/index-validation";
+ return false;
}
}
+ }
- result.appendNumber( "nIndexesWas", all.size() );
+ result.appendNumber("nIndexesWas", all.size());
- {
- WriteUnitOfWork wunit(txn);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
- if ( !s.isOK() ) {
- errmsg = "dropIndexes failed";
- return appendCommandStatus( result, s );
- }
- wunit.commit();
+ {
+ WriteUnitOfWork wunit(txn);
+ Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ if (!s.isOK()) {
+ errmsg = "dropIndexes failed";
+ return appendCommandStatus(result, s);
}
+ wunit.commit();
+ }
- MultiIndexBlock indexer(txn, collection);
- // do not want interruption as that will leave us without indexes.
-
- Status status = indexer.init(all);
- if (!status.isOK())
- return appendCommandStatus( result, status );
-
- status = indexer.insertAllDocumentsInCollection();
- if (!status.isOK())
- return appendCommandStatus( result, status );
+ MultiIndexBlock indexer(txn, collection);
+ // do not want interruption as that will leave us without indexes.
- {
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
+ Status status = indexer.init(all);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- result.append( "nIndexes", (int)all.size() );
- result.append( "indexes", all );
+ status = indexer.insertAllDocumentsInCollection();
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- return true;
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
}
- } cmdReIndex;
+ result.append("nIndexes", (int)all.size());
+ result.append("indexes", all);
+ return true;
+ }
+} cmdReIndex;
}
diff --git a/src/mongo/db/commands/explain_cmd.cpp b/src/mongo/db/commands/explain_cmd.cpp
index 9560a41f9fc..9a3abcc6176 100644
--- a/src/mongo/db/commands/explain_cmd.cpp
+++ b/src/mongo/db/commands/explain_cmd.cpp
@@ -43,84 +43,82 @@
namespace mongo {
- using std::string;
+using std::string;
- static CmdExplain cmdExplain;
+static CmdExplain cmdExplain;
- Status CmdExplain::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
+Status CmdExplain::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::BadValue, "explain command requires a nested object");
+ }
+
+ BSONObj explainObj = cmdObj.firstElement().Obj();
+
+ Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "unknown command: " << explainObj.firstElementFieldName();
+ return Status(ErrorCodes::CommandNotFound, ss);
+ }
- BSONObj explainObj = cmdObj.firstElement().Obj();
+ return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+}
+
+bool CmdExplain::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ExplainCommon::Verbosity verbosity;
+ Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus);
+ }
+
+ // This is the nested command which we are explaining.
+ BSONObj explainObj = cmdObj.firstElement().Obj();
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "unknown command: " << explainObj.firstElementFieldName();
- return Status(ErrorCodes::CommandNotFound, ss);
- }
+ Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
+ Status explainStatus(ErrorCodes::CommandNotFound, ss);
+ return appendCommandStatus(result, explainStatus);
+ }
- return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+ // Check whether the child command is allowed to run here. TODO: this logic is
+ // copied from Command::execCommand and should be abstracted. Until then, make
+ // sure to keep it up to date.
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
+ bool commandCanRunOnSecondary = commToExplain->slaveOk();
+
+ bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
+ (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
+ rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none);
+ bool iAmStandalone = !txn->writesAreReplicated();
+
+ const bool canRunHere = iAmPrimary || commandCanRunOnSecondary ||
+ commandIsOverriddenToRunOnSecondary || iAmStandalone;
+
+ if (!canRunHere) {
+ mongoutils::str::stream ss;
+ ss << "Explain's child command cannot run on this node. "
+ << "Are you explaining a write command on a secondary?";
+ appendCommandStatus(result, false, ss);
+ return false;
}
- bool CmdExplain::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj, int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
- }
-
- // This is the nested command which we are explaining.
- BSONObj explainObj = cmdObj.firstElement().Obj();
-
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "Explain failed due to unknown command: " << explainObj.firstElementFieldName();
- Status explainStatus(ErrorCodes::CommandNotFound, ss);
- return appendCommandStatus(result, explainStatus);
- }
-
- // Check whether the child command is allowed to run here. TODO: this logic is
- // copied from Command::execCommand and should be abstracted. Until then, make
- // sure to keep it up to date.
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- bool iAmPrimary = replCoord->canAcceptWritesForDatabase(dbname);
- bool commandCanRunOnSecondary = commToExplain->slaveOk();
-
- bool commandIsOverriddenToRunOnSecondary = commToExplain->slaveOverrideOk() &&
- (rpc::ServerSelectionMetadata::get(txn).isSecondaryOk() ||
- rpc::ServerSelectionMetadata::get(txn).getReadPreference() != boost::none);
- bool iAmStandalone = !txn->writesAreReplicated();
-
- const bool canRunHere = iAmPrimary ||
- commandCanRunOnSecondary ||
- commandIsOverriddenToRunOnSecondary ||
- iAmStandalone;
-
- if (!canRunHere) {
- mongoutils::str::stream ss;
- ss << "Explain's child command cannot run on this node. "
- << "Are you explaining a write command on a secondary?";
- appendCommandStatus(result, false, ss);
- return false;
- }
-
- // Actually call the nested command's explain(...) method.
- Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result);
- if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
- }
-
- return true;
+ // Actually call the nested command's explain(...) method.
+ Status explainStatus = commToExplain->explain(txn, dbname, explainObj, verbosity, &result);
+ if (!explainStatus.isOK()) {
+ return appendCommandStatus(result, explainStatus);
}
-} // namespace mongo
+ return true;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/explain_cmd.h b/src/mongo/db/commands/explain_cmd.h
index 24edec205ca..7d77bd3af36 100644
--- a/src/mongo/db/commands/explain_cmd.h
+++ b/src/mongo/db/commands/explain_cmd.h
@@ -33,57 +33,63 @@
namespace mongo {
- /**
- * The explain command is used to generate explain output for any read or write
- * operation which has a query component (e.g. find, count, update, remove, distinct, etc.).
- *
- * The explain command takes as its argument a nested object which specifies the command to
- * explain, and a verbosity indicator. For example:
- *
- * {explain: {count: "coll", query: {foo: "bar"}}, verbosity: "executionStats"}
- *
- * This command like a dispatcher: it just retrieves a pointer to the nested command and
- * invokes its explain() implementation.
- */
- class CmdExplain : public Command {
- public:
- CmdExplain() : Command("explain") { }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * The explain command is used to generate explain output for any read or write
+ * operation which has a query component (e.g. find, count, update, remove, distinct, etc.).
+ *
+ * The explain command takes as its argument a nested object which specifies the command to
+ * explain, and a verbosity indicator. For example:
+ *
+ * {explain: {count: "coll", query: {foo: "bar"}}, verbosity: "executionStats"}
+ *
+ * This command like a dispatcher: it just retrieves a pointer to the nested command and
+ * invokes its explain() implementation.
+ */
+class CmdExplain : public Command {
+public:
+ CmdExplain() : Command("explain") {}
- /**
- * Running an explain on a secondary requires explicitly setting slaveOk.
- */
- virtual bool slaveOk() const {
- return false;
- }
- virtual bool slaveOverrideOk() const {
- return true;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool maintenanceOk() const { return false; }
+ /**
+ * Running an explain on a secondary requires explicitly setting slaveOk.
+ */
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- virtual bool adminOnly() const { return false; }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
- virtual void help( std::stringstream& help ) const {
- help << "explain database reads and writes";
- }
+ virtual bool adminOnly() const {
+ return false;
+ }
- /**
- * You are authorized to run an explain if you are authorized to run
- * the command that you are explaining. The auth check is performed recursively
- * on the nested command.
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+ virtual void help(std::stringstream& help) const {
+ help << "explain database reads and writes";
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj, int options,
- std::string& errmsg,
- BSONObjBuilder& result);
+ /**
+ * You are authorized to run an explain if you are authorized to run
+ * the command that you are explaining. The auth check is performed recursively
+ * on the nested command.
+ */
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- };
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp
index 944d3ca491c..2b6e5cff0fc 100644
--- a/src/mongo/db/commands/fail_point_cmd.cpp
+++ b/src/mongo/db/commands/fail_point_cmd.cpp
@@ -37,140 +37,136 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- /**
- * Command for modifying installed fail points.
- *
- * Format
- * {
- * configureFailPoint: <string>, // name of the fail point.
- * mode: <string|Object>, // the new mode to set. Can have one of the
- * following format:
- *
- * 1. 'off' - disable fail point.
- * 2. 'alwaysOn' - fail point is always active.
- * 3. { activationProbability: <n> } - n should be a double between 0 and 1,
- * representing the probability that the fail point will fire. 0 means never,
- * 1 means (nearly) always.
- * 4. { times: <n> } - n should be positive and within the range of a 32 bit
- * signed integer and this is the number of passes on the fail point will
- * remain activated.
- *
- * data: <Object> // optional arbitrary object to store.
- * }
- */
- class FaultInjectCmd: public Command {
- public:
- FaultInjectCmd(): Command("configureFailPoint") {}
-
- virtual bool slaveOk() const {
- return true;
- }
+using std::string;
+using std::stringstream;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * Command for modifying installed fail points.
+ *
+ * Format
+ * {
+ * configureFailPoint: <string>, // name of the fail point.
+ * mode: <string|Object>, // the new mode to set. Can have one of the
+ * following format:
+ *
+ * 1. 'off' - disable fail point.
+ * 2. 'alwaysOn' - fail point is always active.
+ * 3. { activationProbability: <n> } - n should be a double between 0 and 1,
+ * representing the probability that the fail point will fire. 0 means never,
+ * 1 means (nearly) always.
+ * 4. { times: <n> } - n should be positive and within the range of a 32 bit
+ * signed integer and this is the number of passes on the fail point will
+ * remain activated.
+ *
+ * data: <Object> // optional arbitrary object to store.
+ * }
+ */
+class FaultInjectCmd : public Command {
+public:
+ FaultInjectCmd() : Command("configureFailPoint") {}
- virtual bool adminOnly() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual void help(stringstream& h) const {
- h << "modifies the settings of a fail point";
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+
+ virtual void help(stringstream& h) const {
+ h << "modifies the settings of a fail point";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string failPointName(cmdObj.firstElement().str());
+ FailPointRegistry* registry = getGlobalFailPointRegistry();
+ FailPoint* failPoint = registry->getFailPoint(failPointName);
+
+ if (failPoint == NULL) {
+ errmsg = failPointName + " not found";
+ return false;
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const string failPointName(cmdObj.firstElement().str());
- FailPointRegistry* registry = getGlobalFailPointRegistry();
- FailPoint* failPoint = registry->getFailPoint(failPointName);
-
- if (failPoint == NULL) {
- errmsg = failPointName + " not found";
+ FailPoint::Mode mode = FailPoint::alwaysOn;
+ FailPoint::ValType val = 0;
+
+ const BSONElement modeElem(cmdObj["mode"]);
+ if (modeElem.eoo()) {
+ result.appendElements(failPoint->toBSON());
+ return true;
+ } else if (modeElem.type() == String) {
+ const string modeStr(modeElem.valuestr());
+
+ if (modeStr == "off") {
+ mode = FailPoint::off;
+ } else if (modeStr == "alwaysOn") {
+ mode = FailPoint::alwaysOn;
+ } else {
+ errmsg = "unknown mode: " + modeStr;
return false;
}
+ } else if (modeElem.type() == Object) {
+ const BSONObj modeObj(modeElem.Obj());
- FailPoint::Mode mode = FailPoint::alwaysOn;
- FailPoint::ValType val = 0;
+ if (modeObj.hasField("times")) {
+ mode = FailPoint::nTimes;
+ const int intVal = modeObj["times"].numberInt();
- const BSONElement modeElem(cmdObj["mode"]);
- if (modeElem.eoo()) {
- result.appendElements(failPoint->toBSON());
- return true;
- }
- else if (modeElem.type() == String) {
- const string modeStr(modeElem.valuestr());
-
- if (modeStr == "off") {
- mode = FailPoint::off;
- }
- else if (modeStr == "alwaysOn") {
- mode = FailPoint::alwaysOn;
- }
- else {
- errmsg = "unknown mode: " + modeStr;
+ if (intVal < 0) {
+ errmsg = "times should be positive";
return false;
}
- }
- else if (modeElem.type() == Object) {
- const BSONObj modeObj(modeElem.Obj());
-
- if (modeObj.hasField("times")) {
- mode = FailPoint::nTimes;
- const int intVal = modeObj["times"].numberInt();
- if (intVal < 0) {
- errmsg = "times should be positive";
- return false;
- }
-
- val = intVal;
- }
- else if (modeObj.hasField("activationProbability")) {
- mode = FailPoint::random;
- const double activationProbability =
- modeObj["activationProbability"].numberDouble();
- if (activationProbability < 0 || activationProbability > 1) {
- errmsg = str::stream() <<
- "activationProbability must be between 0.0 and 1.0; found " <<
- activationProbability;
- return false;
- }
- val = static_cast<int32_t>(
- std::numeric_limits<int32_t>::max() * activationProbability);
- }
- else {
- errmsg = "invalid mode object";
+ val = intVal;
+ } else if (modeObj.hasField("activationProbability")) {
+ mode = FailPoint::random;
+ const double activationProbability =
+ modeObj["activationProbability"].numberDouble();
+ if (activationProbability < 0 || activationProbability > 1) {
+ errmsg = str::stream()
+ << "activationProbability must be between 0.0 and 1.0; found "
+ << activationProbability;
return false;
}
- }
- else {
- errmsg = "invalid mode format";
+ val = static_cast<int32_t>(std::numeric_limits<int32_t>::max() *
+ activationProbability);
+ } else {
+ errmsg = "invalid mode object";
return false;
}
-
- BSONObj dataObj;
- if (cmdObj.hasField("data")) {
- dataObj = cmdObj["data"].Obj();
- }
-
- failPoint->setMode(mode, val, dataObj);
- return true;
+ } else {
+ errmsg = "invalid mode format";
+ return false;
}
- };
- MONGO_INITIALIZER(RegisterFaultInjectCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new FaultInjectCmd();
+
+ BSONObj dataObj;
+ if (cmdObj.hasField("data")) {
+ dataObj = cmdObj["data"].Obj();
}
- return Status::OK();
+
+ failPoint->setMode(mode, val, dataObj);
+ return true;
}
+};
+MONGO_INITIALIZER(RegisterFaultInjectCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new FaultInjectCmd();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 609b29673c2..ee7c2544ac3 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -67,423 +67,414 @@ namespace mongo {
namespace {
- const UpdateStats* getUpdateStats(const PlanStageStats* stats) {
- // The stats may refer to an update stage, or a projection stage wrapping an update stage.
- if (StageType::STAGE_PROJECTION == stats->stageType) {
- invariant(stats->children.size() == 1);
- stats = stats->children[0];
- }
+const UpdateStats* getUpdateStats(const PlanStageStats* stats) {
+ // The stats may refer to an update stage, or a projection stage wrapping an update stage.
+ if (StageType::STAGE_PROJECTION == stats->stageType) {
+ invariant(stats->children.size() == 1);
+ stats = stats->children[0];
+ }
- invariant(StageType::STAGE_UPDATE == stats->stageType);
- return static_cast<UpdateStats*>(stats->specific.get());
+ invariant(StageType::STAGE_UPDATE == stats->stageType);
+ return static_cast<UpdateStats*>(stats->specific.get());
+}
+
+const DeleteStats* getDeleteStats(const PlanStageStats* stats) {
+ // The stats may refer to a delete stage, or a projection stage wrapping a delete stage.
+ if (StageType::STAGE_PROJECTION == stats->stageType) {
+ invariant(stats->children.size() == 1);
+ stats = stats->children[0];
}
- const DeleteStats* getDeleteStats(const PlanStageStats* stats) {
- // The stats may refer to a delete stage, or a projection stage wrapping a delete stage.
- if (StageType::STAGE_PROJECTION == stats->stageType) {
- invariant(stats->children.size() == 1);
- stats = stats->children[0];
- }
+ invariant(StageType::STAGE_DELETE == stats->stageType);
+ return static_cast<DeleteStats*>(stats->specific.get());
+}
- invariant(StageType::STAGE_DELETE == stats->stageType);
- return static_cast<DeleteStats*>(stats->specific.get());
+/**
+ * If the operation succeeded, then Status::OK() is returned, possibly with a document value
+ * to return to the client. If no matching document to update or remove was found, then none
+ * is returned. Otherwise, the updated or deleted document is returned.
+ *
+ * If the operation failed, then an error Status is returned.
+ */
+StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool isRemove) {
+ BSONObj value;
+ PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
+ if (PlanExecutor::ADVANCED == state) {
+ return boost::optional<BSONObj>(std::move(value));
}
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during findAndModify: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
- /**
- * If the operation succeeded, then Status::OK() is returned, possibly with a document value
- * to return to the client. If no matching document to update or remove was found, then none
- * is returned. Otherwise, the updated or deleted document is returned.
- *
- * If the operation failed, then an error Status is returned.
- */
- StatusWith<boost::optional<BSONObj>> advanceExecutor(PlanExecutor* exec, bool isRemove) {
- BSONObj value;
- PlanExecutor::ExecState state = exec->getNext(&value, nullptr);
- if (PlanExecutor::ADVANCED == state) {
- return boost::optional<BSONObj>(std::move(value));
+ if (WorkingSetCommon::isValidStatusMemberObject(value)) {
+ const Status errorStatus = WorkingSetCommon::getMemberObjectStatus(value);
+ invariant(!errorStatus.isOK());
+ return {errorStatus.code(), errorStatus.reason()};
}
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during findAndModify: "
- << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
-
- if (WorkingSetCommon::isValidStatusMemberObject(value)) {
- const Status errorStatus =
- WorkingSetCommon::getMemberObjectStatus(value);
- invariant(!errorStatus.isOK());
- return {errorStatus.code(), errorStatus.reason()};
- }
- const std::string opstr = isRemove ? "delete" : "update";
- return {ErrorCodes::OperationFailed, str::stream()
- << "executor returned " << PlanExecutor::statestr(state)
- << " while executing " << opstr};
+ const std::string opstr = isRemove ? "delete" : "update";
+ return {ErrorCodes::OperationFailed,
+ str::stream() << "executor returned " << PlanExecutor::statestr(state)
+ << " while executing " << opstr};
+ }
+ invariant(state == PlanExecutor::IS_EOF);
+ return boost::optional<BSONObj>(boost::none);
+}
+
+void makeUpdateRequest(const FindAndModifyRequest& args,
+ bool explain,
+ UpdateLifecycleImpl* updateLifecycle,
+ UpdateRequest* requestOut) {
+ requestOut->setQuery(args.getQuery());
+ requestOut->setProj(args.getFields());
+ requestOut->setUpdates(args.getUpdateObj());
+ requestOut->setSort(args.getSort());
+ requestOut->setUpsert(args.isUpsert());
+ requestOut->setReturnDocs(args.shouldReturnNew() ? UpdateRequest::RETURN_NEW
+ : UpdateRequest::RETURN_OLD);
+ requestOut->setMulti(false);
+ requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ requestOut->setExplain(explain);
+ requestOut->setLifecycle(updateLifecycle);
+}
+
+void makeDeleteRequest(const FindAndModifyRequest& args, bool explain, DeleteRequest* requestOut) {
+ requestOut->setQuery(args.getQuery());
+ requestOut->setProj(args.getFields());
+ requestOut->setSort(args.getSort());
+ requestOut->setMulti(false);
+ requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ requestOut->setReturnDeleted(true); // Always return the old value.
+ requestOut->setExplain(explain);
+}
+
+void appendCommandResponse(PlanExecutor* exec,
+ bool isRemove,
+ const boost::optional<BSONObj>& value,
+ BSONObjBuilder& result) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ BSONObjBuilder lastErrorObjBuilder(result.subobjStart("lastErrorObject"));
+
+ if (isRemove) {
+ lastErrorObjBuilder.appendNumber("n", getDeleteStats(stats.get())->docsDeleted);
+ } else {
+ const UpdateStats* updateStats = getUpdateStats(stats.get());
+ lastErrorObjBuilder.appendBool("updatedExisting", updateStats->nMatched > 0);
+ lastErrorObjBuilder.appendNumber("n", updateStats->inserted ? 1 : updateStats->nMatched);
+ // Note we have to use the objInserted from the stats here, rather than 'value'
+ // because the _id field could have been excluded by a projection.
+ if (!updateStats->objInserted.isEmpty()) {
+ lastErrorObjBuilder.appendAs(updateStats->objInserted["_id"], kUpsertedFieldName);
}
- invariant(state == PlanExecutor::IS_EOF);
- return boost::optional<BSONObj>(boost::none);
}
+ lastErrorObjBuilder.done();
- void makeUpdateRequest(const FindAndModifyRequest& args,
- bool explain,
- UpdateLifecycleImpl* updateLifecycle,
- UpdateRequest* requestOut) {
- requestOut->setQuery(args.getQuery());
- requestOut->setProj(args.getFields());
- requestOut->setUpdates(args.getUpdateObj());
- requestOut->setSort(args.getSort());
- requestOut->setUpsert(args.isUpsert());
- requestOut->setReturnDocs(args.shouldReturnNew()
- ? UpdateRequest::RETURN_NEW
- : UpdateRequest::RETURN_OLD);
- requestOut->setMulti(false);
- requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- requestOut->setExplain(explain);
- requestOut->setLifecycle(updateLifecycle);
+ if (value) {
+ result.append("value", *value);
+ } else {
+ result.appendNull("value");
}
+}
+
+Status checkCanAcceptWritesForDatabase(const NamespaceString& nsString) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString)) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream()
+ << "Not primary while running findAndModify command on collection "
+ << nsString.ns());
+ }
+ return Status::OK();
+}
+
+} // namespace
- void makeDeleteRequest(const FindAndModifyRequest& args,
- bool explain,
- DeleteRequest* requestOut) {
- requestOut->setQuery(args.getQuery());
- requestOut->setProj(args.getFields());
- requestOut->setSort(args.getSort());
- requestOut->setMulti(false);
- requestOut->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- requestOut->setReturnDeleted(true); // Always return the old value.
- requestOut->setExplain(explain);
+/* Find and Modify an object returning either the old (default) or new value*/
+class CmdFindAndModify : public Command {
+public:
+ void help(std::stringstream& help) const override {
+ help << "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: "
+ "{processed:true}}, new: true}\n"
+ "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: "
+ "{priority:-1}}\n"
+ "Either update or remove is required, all other fields have default values.\n"
+ "Output is in the \"value\" field\n";
}
- void appendCommandResponse(PlanExecutor* exec,
- bool isRemove,
- const boost::optional<BSONObj>& value,
- BSONObjBuilder& result) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- BSONObjBuilder lastErrorObjBuilder(result.subobjStart("lastErrorObject"));
+ CmdFindAndModify() : Command("findAndModify", false, "findandmodify") {}
+ bool slaveOk() const override {
+ return false;
+ }
+ bool isWriteCommandForConfigServer() const override {
+ return true;
+ }
+ void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) override {
+ find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
+ }
- if (isRemove) {
- lastErrorObjBuilder.appendNumber("n", getDeleteStats(stats.get())->docsDeleted);
+ Status explain(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const override {
+ const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(fullNs);
+ if (!allowedWriteStatus.isOK()) {
+ return allowedWriteStatus;
}
- else {
- const UpdateStats* updateStats = getUpdateStats(stats.get());
- lastErrorObjBuilder.appendBool("updatedExisting", updateStats->nMatched > 0);
- lastErrorObjBuilder.appendNumber("n", updateStats->inserted ? 1
- : updateStats->nMatched);
- // Note we have to use the objInserted from the stats here, rather than 'value'
- // because the _id field could have been excluded by a projection.
- if (!updateStats->objInserted.isEmpty()) {
- lastErrorObjBuilder.appendAs(updateStats->objInserted["_id"], kUpsertedFieldName);
- }
- }
- lastErrorObjBuilder.done();
- if (value) {
- result.append("value", *value);
- }
- else {
- result.appendNull("value");
+ StatusWith<FindAndModifyRequest> parseStatus =
+ FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
}
- }
- Status checkCanAcceptWritesForDatabase(const NamespaceString& nsString) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nsString)) {
- return Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while running findAndModify command on collection "
- << nsString.ns());
- }
- return Status::OK();
- }
+ const FindAndModifyRequest& args = parseStatus.getValue();
+ const NamespaceString& nsString = args.getNamespaceString();
-} // namespace
+ auto client = txn->getClient();
- /* Find and Modify an object returning either the old (default) or new value*/
- class CmdFindAndModify : public Command {
- public:
- void help(std::stringstream& help) const override {
- help <<
- "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
- "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
- "Either update or remove is required, all other fields have default values.\n"
- "Output is in the \"value\" field\n";
- }
+ if (args.isRemove()) {
+ DeleteRequest request(nsString);
+ const bool isExplain = true;
+ makeDeleteRequest(args, isExplain, &request);
- CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
- bool slaveOk() const override { return false; }
- bool isWriteCommandForConfigServer() const override { return true; }
- void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) override {
- find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
- }
+ ParsedDelete parsedDelete(txn, &request);
+ Status parsedDeleteStatus = parsedDelete.parseRequest();
+ if (!parsedDeleteStatus.isOK()) {
+ return parsedDeleteStatus;
+ }
+
+ // Explain calls of the findAndModify command are read-only, but we take write
+ // locks so that the timing information is more accurate.
+ AutoGetDb autoDb(txn, dbName, MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+
+ ensureShardVersionOKOrThrow(client, nsString.ns());
- Status explain(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const override {
- const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(fullNs);
- if (!allowedWriteStatus.isOK()) {
- return allowedWriteStatus;
+ Collection* collection = nullptr;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ } else {
+ return {ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbName << " does not exist."};
}
- StatusWith<FindAndModifyRequest> parseStatus =
- FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
}
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
+ Explain::explainStages(exec.get(), verbosity, out);
+ } else {
+ UpdateRequest request(nsString);
+ const bool ignoreVersion = false;
+ UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
+ const bool isExplain = true;
+ makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
+
+ ParsedUpdate parsedUpdate(txn, &request);
+ Status parsedUpdateStatus = parsedUpdate.parseRequest();
+ if (!parsedUpdateStatus.isOK()) {
+ return parsedUpdateStatus;
+ }
+
+ OpDebug* opDebug = &CurOp::get(txn)->debug();
- const FindAndModifyRequest& args = parseStatus.getValue();
- const NamespaceString& nsString = args.getNamespaceString();
+ // Explain calls of the findAndModify command are read-only, but we take write
+ // locks so that the timing information is more accurate.
+ AutoGetDb autoDb(txn, dbName, MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- auto client = txn->getClient();
+ ensureShardVersionOKOrThrow(client, nsString.ns());
+ Collection* collection = nullptr;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ } else {
+ return {ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbName << " does not exist."};
+ }
+
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
+ }
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
+ Explain::explainStages(exec.get(), verbosity, out);
+ }
+
+ return Status::OK();
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ // findAndModify command is not replicated directly.
+ invariant(txn->writesAreReplicated());
+ const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
+ Status allowedWriteStatus = userAllowedWriteNS(fullNs);
+ if (!allowedWriteStatus.isOK()) {
+ return appendCommandStatus(result, allowedWriteStatus);
+ }
+
+ StatusWith<FindAndModifyRequest> parseStatus =
+ FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus.getStatus());
+ }
+
+ const FindAndModifyRequest& args = parseStatus.getValue();
+ const NamespaceString& nsString = args.getNamespaceString();
+
+ StatusWith<WriteConcernOptions> wcResult = extractWriteConcern(cmdObj);
+ if (!wcResult.isOK()) {
+ return appendCommandStatus(result, wcResult.getStatus());
+ }
+ txn->setWriteConcern(wcResult.getValue());
+ setupSynchronousCommit(txn);
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ auto client = txn->getClient();
+
+ // We may encounter a WriteConflictException when creating a collection during an
+ // upsert, even when holding the exclusive lock on the database (due to other load on
+ // the system). The query framework should handle all other WriteConflictExceptions,
+ // but we defensively wrap the operation in the retry loop anyway.
+ //
+ // SERVER-17579 getExecutorUpdate() and getExecutorDelete() can throw a
+ // WriteConflictException when checking whether an index is ready or not.
+ // (on debug builds only)
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
if (args.isRemove()) {
DeleteRequest request(nsString);
- const bool isExplain = true;
+ const bool isExplain = false;
makeDeleteRequest(args, isExplain, &request);
ParsedDelete parsedDelete(txn, &request);
Status parsedDeleteStatus = parsedDelete.parseRequest();
if (!parsedDeleteStatus.isOK()) {
- return parsedDeleteStatus;
+ return appendCommandStatus(result, parsedDeleteStatus);
}
- // Explain calls of the findAndModify command are read-only, but we take write
- // locks so that the timing information is more accurate.
- AutoGetDb autoDb(txn, dbName, MODE_IX);
+ AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
ensureShardVersionOKOrThrow(client, nsString.ns());
- Collection* collection = nullptr;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
- else {
- return {ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbName << " does not exist."};
+ Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimary.isOK()) {
+ return appendCommandStatus(result, isPrimary);
}
PlanExecutor* rawExec;
Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
if (!execStatus.isOK()) {
- return execStatus;
+ return appendCommandStatus(result, execStatus);
}
const std::unique_ptr<PlanExecutor> exec(rawExec);
- Explain::explainStages(exec.get(), verbosity, out);
- }
- else {
+
+ StatusWith<boost::optional<BSONObj>> advanceStatus =
+ advanceExecutor(exec.get(), args.isRemove());
+ if (!advanceStatus.isOK()) {
+ return appendCommandStatus(result, advanceStatus.getStatus());
+ }
+
+ boost::optional<BSONObj> value = advanceStatus.getValue();
+ appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ } else {
UpdateRequest request(nsString);
const bool ignoreVersion = false;
UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
- const bool isExplain = true;
+ const bool isExplain = false;
makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
ParsedUpdate parsedUpdate(txn, &request);
Status parsedUpdateStatus = parsedUpdate.parseRequest();
if (!parsedUpdateStatus.isOK()) {
- return parsedUpdateStatus;
+ return appendCommandStatus(result, parsedUpdateStatus);
}
OpDebug* opDebug = &CurOp::get(txn)->debug();
- // Explain calls of the findAndModify command are read-only, but we take write
- // locks so that the timing information is more accurate.
- AutoGetDb autoDb(txn, dbName, MODE_IX);
+ AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
+ Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
ensureShardVersionOKOrThrow(client, nsString.ns());
- Collection* collection = nullptr;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
- else {
- return {ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbName << " does not exist."};
- }
-
- PlanExecutor* rawExec;
- Status execStatus = getExecutorUpdate(txn, collection, &parsedUpdate, opDebug,
- &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
+ Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimary.isOK()) {
+ return appendCommandStatus(result, isPrimary);
}
- const std::unique_ptr<PlanExecutor> exec(rawExec);
- Explain::explainStages(exec.get(), verbosity, out);
- }
-
- return Status::OK();
- }
-
- bool run(OperationContext* txn,
- const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- // findAndModify command is not replicated directly.
- invariant(txn->writesAreReplicated());
- const std::string fullNs = parseNsCollectionRequired(dbName, cmdObj);
- Status allowedWriteStatus = userAllowedWriteNS(fullNs);
- if (!allowedWriteStatus.isOK()) {
- return appendCommandStatus(result, allowedWriteStatus);
- }
- StatusWith<FindAndModifyRequest> parseStatus =
- FindAndModifyRequest::parseFromBSON(NamespaceString(fullNs), cmdObj);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
- }
-
- const FindAndModifyRequest& args = parseStatus.getValue();
- const NamespaceString& nsString = args.getNamespaceString();
-
- StatusWith<WriteConcernOptions> wcResult = extractWriteConcern(cmdObj);
- if (!wcResult.isOK()) {
- return appendCommandStatus(result, wcResult.getStatus());
- }
- txn->setWriteConcern(wcResult.getValue());
- setupSynchronousCommit(txn);
-
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- auto client = txn->getClient();
-
- // We may encounter a WriteConflictException when creating a collection during an
- // upsert, even when holding the exclusive lock on the database (due to other load on
- // the system). The query framework should handle all other WriteConflictExceptions,
- // but we defensively wrap the operation in the retry loop anyway.
- //
- // SERVER-17579 getExecutorUpdate() and getExecutorDelete() can throw a
- // WriteConflictException when checking whether an index is ready or not.
- // (on debug builds only)
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (args.isRemove()) {
- DeleteRequest request(nsString);
- const bool isExplain = false;
- makeDeleteRequest(args, isExplain, &request);
-
- ParsedDelete parsedDelete(txn, &request);
- Status parsedDeleteStatus = parsedDelete.parseRequest();
- if (!parsedDeleteStatus.isOK()) {
- return appendCommandStatus(result, parsedDeleteStatus);
- }
-
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
-
- ensureShardVersionOKOrThrow(client, nsString.ns());
-
- Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimary.isOK()) {
- return appendCommandStatus(result, isPrimary);
- }
-
- PlanExecutor* rawExec;
- Status execStatus = getExecutorDelete(txn, collection, &parsedDelete, &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- const std::unique_ptr<PlanExecutor> exec(rawExec);
-
- StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
- if (!advanceStatus.isOK()) {
- return appendCommandStatus(result, advanceStatus.getStatus());
- }
-
- boost::optional<BSONObj> value = advanceStatus.getValue();
- appendCommandResponse(exec.get(), args.isRemove(), value, result);
- }
- else {
- UpdateRequest request(nsString);
- const bool ignoreVersion = false;
- UpdateLifecycleImpl updateLifecycle(ignoreVersion, nsString);
- const bool isExplain = false;
- makeUpdateRequest(args, isExplain, &updateLifecycle, &request);
-
- ParsedUpdate parsedUpdate(txn, &request);
- Status parsedUpdateStatus = parsedUpdate.parseRequest();
- if (!parsedUpdateStatus.isOK()) {
- return appendCommandStatus(result, parsedUpdateStatus);
- }
-
- OpDebug* opDebug = &CurOp::get(txn)->debug();
-
- AutoGetOrCreateDb autoDb(txn, dbName, MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nsString.ns(), MODE_IX);
- Collection* collection = autoDb.getDb()->getCollection(nsString.ns());
-
- ensureShardVersionOKOrThrow(client, nsString.ns());
-
- Status isPrimary = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimary.isOK()) {
- return appendCommandStatus(result, isPrimary);
+ // Create the collection if it does not exist when performing an upsert
+ // because the update stage does not create its own collection.
+ if (!collection && args.isUpsert()) {
+ // Release the collection lock and reacquire a lock on the database
+ // in exclusive mode in order to create the collection.
+ collLock.relockAsDatabaseExclusive(autoDb.lock());
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(nsString);
+ if (!isPrimaryAfterRelock.isOK()) {
+ return appendCommandStatus(result, isPrimaryAfterRelock);
}
- // Create the collection if it does not exist when performing an upsert
- // because the update stage does not create its own collection.
- if (!collection && args.isUpsert()) {
- // Release the collection lock and reacquire a lock on the database
- // in exclusive mode in order to create the collection.
- collLock.relockAsDatabaseExclusive(autoDb.lock());
- collection = autoDb.getDb()->getCollection(nsString.ns());
- Status isPrimaryAfterRelock = checkCanAcceptWritesForDatabase(nsString);
- if (!isPrimaryAfterRelock.isOK()) {
- return appendCommandStatus(result, isPrimaryAfterRelock);
- }
-
- if (collection) {
- // Someone else beat us to creating the collection, do nothing.
- }
- else {
- WriteUnitOfWork wuow(txn);
- Status createCollStatus = userCreateNS(txn, autoDb.getDb(),
- nsString.ns(), BSONObj());
- if (!createCollStatus.isOK()) {
- return appendCommandStatus(result, createCollStatus);
- }
- wuow.commit();
-
- collection = autoDb.getDb()->getCollection(nsString.ns());
- invariant(collection);
+ if (collection) {
+ // Someone else beat us to creating the collection, do nothing.
+ } else {
+ WriteUnitOfWork wuow(txn);
+ Status createCollStatus =
+ userCreateNS(txn, autoDb.getDb(), nsString.ns(), BSONObj());
+ if (!createCollStatus.isOK()) {
+ return appendCommandStatus(result, createCollStatus);
}
- }
+ wuow.commit();
- PlanExecutor* rawExec;
- Status execStatus = getExecutorUpdate(txn, collection, &parsedUpdate, opDebug,
- &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- const std::unique_ptr<PlanExecutor> exec(rawExec);
-
- StatusWith<boost::optional<BSONObj>> advanceStatus =
- advanceExecutor(exec.get(), args.isRemove());
- if (!advanceStatus.isOK()) {
- return appendCommandStatus(result, advanceStatus.getStatus());
+ collection = autoDb.getDb()->getCollection(nsString.ns());
+ invariant(collection);
}
+ }
- boost::optional<BSONObj> value = advanceStatus.getValue();
- appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ PlanExecutor* rawExec;
+ Status execStatus =
+ getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec);
+ if (!execStatus.isOK()) {
+ return appendCommandStatus(result, execStatus);
}
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+ const std::unique_ptr<PlanExecutor> exec(rawExec);
- WriteConcernResult res;
- auto waitForWCStatus = waitForWriteConcern(
- txn,
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(),
- &res
- );
- appendCommandWCStatus(result, waitForWCStatus);
+ StatusWith<boost::optional<BSONObj>> advanceStatus =
+ advanceExecutor(exec.get(), args.isRemove());
+ if (!advanceStatus.isOK()) {
+ return appendCommandStatus(result, advanceStatus.getStatus());
+ }
- return true;
+ boost::optional<BSONObj> value = advanceStatus.getValue();
+ appendCommandResponse(exec.get(), args.isRemove(), value, result);
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", nsString.ns());
+
+ WriteConcernResult res;
+ auto waitForWCStatus = waitForWriteConcern(
+ txn, repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(), &res);
+ appendCommandWCStatus(result, waitForWCStatus);
+
+ return true;
+ }
- } cmdFindAndModify;
+} cmdFindAndModify;
} // namespace mongo
diff --git a/src/mongo/db/commands/find_and_modify.h b/src/mongo/db/commands/find_and_modify.h
index ab1a6eff18c..cd6c08e7c25 100644
--- a/src/mongo/db/commands/find_and_modify.h
+++ b/src/mongo/db/commands/find_and_modify.h
@@ -36,16 +36,14 @@
namespace mongo {
- class Command;
+class Command;
namespace find_and_modify {
- void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
-
-} // namespace find_and_modify
-} // namespace mongo
-
+void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+} // namespace find_and_modify
+} // namespace mongo
diff --git a/src/mongo/db/commands/find_and_modify_common.cpp b/src/mongo/db/commands/find_and_modify_common.cpp
index abe8bdab8f2..8179ea72406 100644
--- a/src/mongo/db/commands/find_and_modify_common.cpp
+++ b/src/mongo/db/commands/find_and_modify_common.cpp
@@ -44,34 +44,35 @@
namespace mongo {
namespace find_and_modify {
- void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- bool update = cmdObj["update"].trueValue();
- bool upsert = cmdObj["upsert"].trueValue();
- bool remove = cmdObj["remove"].trueValue();
- bool bypassDocumentValidation = shouldBypassDocumentValidationForCommand(cmdObj);
+void addPrivilegesRequiredForFindAndModify(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ bool update = cmdObj["update"].trueValue();
+ bool upsert = cmdObj["upsert"].trueValue();
+ bool remove = cmdObj["remove"].trueValue();
+ bool bypassDocumentValidation = shouldBypassDocumentValidationForCommand(cmdObj);
- ActionSet actions;
- actions.addAction(ActionType::find);
- if (update) {
- actions.addAction(ActionType::update);
- }
- if (upsert) {
- actions.addAction(ActionType::insert);
- }
- if (remove) {
- actions.addAction(ActionType::remove);
- }
- if (bypassDocumentValidation) {
- actions.addAction(ActionType::bypassDocumentValidation);
- }
- ResourcePattern resource(commandTemplate->parseResourcePattern(dbname, cmdObj));
- uassert(17137, "Invalid target namespace " + resource.toString(),
- resource.isExactNamespacePattern());
- out->push_back(Privilege(resource, actions));
- }
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ if (update) {
+ actions.addAction(ActionType::update);
+ }
+ if (upsert) {
+ actions.addAction(ActionType::insert);
+ }
+ if (remove) {
+ actions.addAction(ActionType::remove);
+ }
+ if (bypassDocumentValidation) {
+ actions.addAction(ActionType::bypassDocumentValidation);
+ }
+ ResourcePattern resource(commandTemplate->parseResourcePattern(dbname, cmdObj));
+ uassert(17137,
+ "Invalid target namespace " + resource.toString(),
+ resource.isExactNamespacePattern());
+ out->push_back(Privilege(resource, actions));
+}
-} // namespace find_and_modify
-} // namespace mongo
+} // namespace find_and_modify
+} // namespace mongo
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 0bc9589bbef..d40ff0f766e 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -55,313 +55,312 @@
namespace mongo {
- /**
- * A command for running .find() queries.
- */
- class FindCmd : public Command {
- MONGO_DISALLOW_COPYING(FindCmd);
- public:
- FindCmd() : Command("find") { }
+/**
+ * A command for running .find() queries.
+ */
+class FindCmd : public Command {
+ MONGO_DISALLOW_COPYING(FindCmd);
- bool isWriteCommandForConfigServer() const override { return false; }
+public:
+ FindCmd() : Command("find") {}
- bool slaveOk() const override { return false; }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- bool slaveOverrideOk() const override { return true; }
+ bool slaveOk() const override {
+ return false;
+ }
- bool maintenanceOk() const override { return false; }
+ bool slaveOverrideOk() const override {
+ return true;
+ }
- bool adminOnly() const override { return false; }
+ bool maintenanceOk() const override {
+ return false;
+ }
- void help(std::stringstream& help) const override {
- help << "query for documents";
- }
+ bool adminOnly() const override {
+ return false;
+ }
- /**
- * A find command does not increment the command counter, but rather increments the
- * query counter.
- */
- bool shouldAffectCommandCounter() const override { return false; }
+ void help(std::stringstream& help) const override {
+ help << "query for documents";
+ }
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+ /**
+ * A find command does not increment the command counter, but rather increments the
+ * query counter.
+ */
+ bool shouldAffectCommandCounter() const override {
+ return false;
+ }
- if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
- return Status::OK();
- }
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
+ return Status::OK();
}
- Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const override {
- const std::string fullns = parseNs(dbname, cmdObj);
- const NamespaceString nss(fullns);
- if (!nss.isValid()) {
- return {ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid collection name: " << nss.ns()};
- }
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const override {
+ const std::string fullns = parseNs(dbname, cmdObj);
+ const NamespaceString nss(fullns);
+ if (!nss.isValid()) {
+ return {ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid collection name: " << nss.ns()};
+ }
- // Parse the command BSON to a LiteParsedQuery.
- const bool isExplain = true;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
+ // Parse the command BSON to a LiteParsedQuery.
+ const bool isExplain = true;
+ auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
+ }
- // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* rawCq;
- WhereCallbackReal whereCallback(txn, nss.db());
- Status canonStatus = CanonicalQuery::canonicalize(lpqStatus.getValue().release(),
- &rawCq,
- whereCallback);
- if (!canonStatus.isOK()) {
- return canonStatus;
- }
- cq.reset(rawCq);
+ // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* rawCq;
+ WhereCallbackReal whereCallback(txn, nss.db());
+ Status canonStatus =
+ CanonicalQuery::canonicalize(lpqStatus.getValue().release(), &rawCq, whereCallback);
+ if (!canonStatus.isOK()) {
+ return canonStatus;
}
+ cq.reset(rawCq);
+ }
- AutoGetCollectionForRead ctx(txn, nss);
- // The collection may be NULL. If so, getExecutor() should handle it by returning
- // an execution tree with an EOFStage.
- Collection* collection = ctx.getCollection();
-
- // We have a parsed query. Time to get the execution plan for it.
- std::unique_ptr<PlanExecutor> exec;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!execStatus.isOK()) {
- return execStatus;
- }
- exec.reset(rawExec);
+ AutoGetCollectionForRead ctx(txn, nss);
+ // The collection may be NULL. If so, getExecutor() should handle it by returning
+ // an execution tree with an EOFStage.
+ Collection* collection = ctx.getCollection();
+
+ // We have a parsed query. Time to get the execution plan for it.
+ std::unique_ptr<PlanExecutor> exec;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorFind(
+ txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!execStatus.isOK()) {
+ return execStatus;
}
+ exec.reset(rawExec);
+ }
- // Got the execution tree. Explain it.
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+ // Got the execution tree. Explain it.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ }
+
+ /**
+ * Runs a query using the following steps:
+ * 1) Parsing.
+ * 2) Acquire locks.
+ * 3) Plan query, obtaining an executor that can run it.
+ * 4) Setup a cursor for the query, which may be used on subsequent getMores.
+ * 5) Generate the first batch.
+ * 6) Save state for getMore.
+ * 7) Generate response to send to the client.
+ *
+ * TODO: Rather than using the sharding version available in thread-local storage
+ * (i.e. call to shardingState.needCollectionMetadata() below), shard version
+ * information should be passed as part of the command parameter.
+ */
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ const std::string fullns = parseNs(dbname, cmdObj);
+ const NamespaceString nss(fullns);
+ if (!nss.isValid()) {
+ return appendCommandStatus(result,
+ {ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid collection name: " << nss.ns()});
}
- /**
- * Runs a query using the following steps:
- * 1) Parsing.
- * 2) Acquire locks.
- * 3) Plan query, obtaining an executor that can run it.
- * 4) Setup a cursor for the query, which may be used on subsequent getMores.
- * 5) Generate the first batch.
- * 6) Save state for getMore.
- * 7) Generate response to send to the client.
- *
- * TODO: Rather than using the sharding version available in thread-local storage
- * (i.e. call to shardingState.needCollectionMetadata() below), shard version
- * information should be passed as part of the command parameter.
- */
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- const std::string fullns = parseNs(dbname, cmdObj);
- const NamespaceString nss(fullns);
- if (!nss.isValid()) {
- return appendCommandStatus(result, {ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid collection name: "
- << nss.ns()});
- }
+ // Although it is a command, a find command gets counted as a query.
+ globalOpCounters.gotQuery();
- // Although it is a command, a find command gets counted as a query.
- globalOpCounters.gotQuery();
+ if (txn->getClient()->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()"));
+ }
- if (txn->getClient()->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run find command from eval()"));
- }
+ // 1a) Parse the command BSON to a LiteParsedQuery.
+ const bool isExplain = false;
+ auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!lpqStatus.isOK()) {
+ return appendCommandStatus(result, lpqStatus.getStatus());
+ }
- // 1a) Parse the command BSON to a LiteParsedQuery.
- const bool isExplain = false;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return appendCommandStatus(result, lpqStatus.getStatus());
- }
+ auto& lpq = lpqStatus.getValue();
- auto& lpq = lpqStatus.getValue();
-
- // Fill out curop information.
- int ntoreturn = lpq->getBatchSize().value_or(0);
- beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip());
-
- // 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
- std::unique_ptr<CanonicalQuery> cq;
- {
- CanonicalQuery* rawCq;
- WhereCallbackReal whereCallback(txn, nss.db());
- Status canonStatus = CanonicalQuery::canonicalize(lpq.release(),
- &rawCq,
- whereCallback);
- if (!canonStatus.isOK()) {
- return appendCommandStatus(result, canonStatus);
- }
- cq.reset(rawCq);
- }
+ // Fill out curop information.
+ int ntoreturn = lpq->getBatchSize().value_or(0);
+ beginQueryOp(txn, nss, cmdObj, ntoreturn, lpq->getSkip());
- // 2) Acquire locks.
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
-
- const int dbProfilingLevel = ctx.getDb() ? ctx.getDb()->getProfilingLevel() :
- serverGlobalParams.defaultProfile;
-
- // It is possible that the sharding version will change during yield while we are
- // retrieving a plan executor. If this happens we will throw an error and mongos will
- // retry.
- const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
-
- // 3) Get the execution plan for the query.
- std::unique_ptr<PlanExecutor> execHolder;
- {
- PlanExecutor* rawExec;
- Status execStatus = getExecutorFind(txn,
- collection,
- nss,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- if (!execStatus.isOK()) {
- return appendCommandStatus(result, execStatus);
- }
- execHolder.reset(rawExec);
+ // 1b) Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ std::unique_ptr<CanonicalQuery> cq;
+ {
+ CanonicalQuery* rawCq;
+ WhereCallbackReal whereCallback(txn, nss.db());
+ Status canonStatus = CanonicalQuery::canonicalize(lpq.release(), &rawCq, whereCallback);
+ if (!canonStatus.isOK()) {
+ return appendCommandStatus(result, canonStatus);
}
+ cq.reset(rawCq);
+ }
- // TODO: Currently, chunk ranges are kept around until all ClientCursors created while
- // the chunk belonged on this node are gone. Separating chunk lifetime management from
- // ClientCursor should allow this check to go away.
- if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
- // Version changed while retrieving a PlanExecutor. Terminate the operation,
- // signaling that mongos should retry.
- throw SendStaleConfigException(nss.ns(),
- "version changed during find command",
- shardingVersionAtStart,
- shardingState.getVersion(nss.ns()));
+ // 2) Acquire locks.
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+
+ const int dbProfilingLevel =
+ ctx.getDb() ? ctx.getDb()->getProfilingLevel() : serverGlobalParams.defaultProfile;
+
+ // It is possible that the sharding version will change during yield while we are
+ // retrieving a plan executor. If this happens we will throw an error and mongos will
+ // retry.
+ const ChunkVersion shardingVersionAtStart = shardingState.getVersion(nss.ns());
+
+ // 3) Get the execution plan for the query.
+ std::unique_ptr<PlanExecutor> execHolder;
+ {
+ PlanExecutor* rawExec;
+ Status execStatus = getExecutorFind(
+ txn, collection, nss, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec);
+ if (!execStatus.isOK()) {
+ return appendCommandStatus(result, execStatus);
}
+ execHolder.reset(rawExec);
+ }
- if (!collection) {
- // No collection. Just fill out curop indicating that there were zero results and
- // there is no ClientCursor id, and then return.
- const int numResults = 0;
- const CursorId cursorId = 0;
- endQueryOp(txn, execHolder.get(), dbProfilingLevel, numResults, cursorId);
- appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
- return true;
- }
+ // TODO: Currently, chunk ranges are kept around until all ClientCursors created while
+ // the chunk belonged on this node are gone. Separating chunk lifetime management from
+ // ClientCursor should allow this check to go away.
+ if (!shardingState.getVersion(nss.ns()).isWriteCompatibleWith(shardingVersionAtStart)) {
+ // Version changed while retrieving a PlanExecutor. Terminate the operation,
+ // signaling that mongos should retry.
+ throw SendStaleConfigException(nss.ns(),
+ "version changed during find command",
+ shardingVersionAtStart,
+ shardingState.getVersion(nss.ns()));
+ }
- const LiteParsedQuery& pq = execHolder->getCanonicalQuery()->getParsed();
-
- // 4) If possible, register the execution plan inside a ClientCursor, and pin that
- // cursor. In this case, ownership of the PlanExecutor is transferred to the
- // ClientCursor, and 'exec' becomes null.
- //
- // First unregister the PlanExecutor so it can be re-registered with ClientCursor.
- execHolder->deregisterExec();
-
- // Create a ClientCursor containing this plan executor. We don't have to worry
- // about leaking it as it's inserted into a global map by its ctor.
- ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
- nss.ns(),
- pq.getOptions(),
- pq.getFilter());
- CursorId cursorId = cursor->cursorid();
- ClientCursorPin ccPin(collection->getCursorManager(), cursorId);
-
- // On early return, get rid of the the cursor.
- ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
-
- invariant(!execHolder);
- PlanExecutor* exec = cursor->getExecutor();
-
- // 5) Stream query results, adding them to a BSONArray as we go.
- BSONArrayBuilder firstBatch;
- BSONObj obj;
- PlanExecutor::ExecState state;
- int numResults = 0;
- while (!enoughForFirstBatch(pq, numResults, firstBatch.len())
- && PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
- // If adding this object will cause us to exceed the BSON size limit, then we stash
- // it for later.
- if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
- exec->enqueue(obj);
- break;
- }
-
- // Add result to output buffer.
- firstBatch.append(obj);
- numResults++;
- }
+ if (!collection) {
+ // No collection. Just fill out curop indicating that there were zero results and
+ // there is no ClientCursor id, and then return.
+ const int numResults = 0;
+ const CursorId cursorId = 0;
+ endQueryOp(txn, execHolder.get(), dbProfilingLevel, numResults, cursorId);
+ appendCursorResponseObject(cursorId, nss.ns(), BSONArray(), &result);
+ return true;
+ }
- // Throw an assertion if query execution fails for any reason.
- if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "Plan executor error during find command: "
- << PlanExecutor::statestr(state)
- << ", stats: " << Explain::statsToBSON(*stats);
-
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during find command: "
- << WorkingSetCommon::toStatusString(obj)));
+ const LiteParsedQuery& pq = execHolder->getCanonicalQuery()->getParsed();
+
+ // 4) If possible, register the execution plan inside a ClientCursor, and pin that
+ // cursor. In this case, ownership of the PlanExecutor is transferred to the
+ // ClientCursor, and 'exec' becomes null.
+ //
+ // First unregister the PlanExecutor so it can be re-registered with ClientCursor.
+ execHolder->deregisterExec();
+
+ // Create a ClientCursor containing this plan executor. We don't have to worry
+ // about leaking it as it's inserted into a global map by its ctor.
+ ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
+ execHolder.release(),
+ nss.ns(),
+ pq.getOptions(),
+ pq.getFilter());
+ CursorId cursorId = cursor->cursorid();
+ ClientCursorPin ccPin(collection->getCursorManager(), cursorId);
+
+ // On early return, get rid of the the cursor.
+ ScopeGuard cursorFreer = MakeGuard(&ClientCursorPin::deleteUnderlying, ccPin);
+
+ invariant(!execHolder);
+ PlanExecutor* exec = cursor->getExecutor();
+
+ // 5) Stream query results, adding them to a BSONArray as we go.
+ BSONArrayBuilder firstBatch;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ int numResults = 0;
+ while (!enoughForFirstBatch(pq, numResults, firstBatch.len()) &&
+ PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
+ // If adding this object will cause us to exceed the BSON size limit, then we stash
+ // it for later.
+ if (firstBatch.len() + obj.objsize() > BSONObjMaxUserSize && numResults > 0) {
+ exec->enqueue(obj);
+ break;
}
- // 6) Set up the cursor for getMore.
- if (shouldSaveCursor(txn, collection, state, exec)) {
- // State will be restored on getMore.
- exec->saveState();
-
- cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
- cursor->setPos(numResults);
-
- // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
- // next getMore.
- if (!(pq.isTailable() && state == PlanExecutor::IS_EOF)) {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for
- // subsequent getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
- txn->setRecoveryUnit(engine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork);
- }
- }
- else {
- cursorId = 0;
- }
+ // Add result to output buffer.
+ firstBatch.append(obj);
+ numResults++;
+ }
- // Fill out curop based on the results.
- endQueryOp(txn, exec, dbProfilingLevel, numResults, cursorId);
+ // Throw an assertion if query execution fails for any reason.
+ if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "Plan executor error during find command: " << PlanExecutor::statestr(state)
+ << ", stats: " << Explain::statsToBSON(*stats);
+
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during find command: "
+ << WorkingSetCommon::toStatusString(obj)));
+ }
- // 7) Generate the response object to send to the client.
- appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
- if (cursorId) {
- cursorFreer.Dismiss();
+ // 6) Set up the cursor for getMore.
+ if (shouldSaveCursor(txn, collection, state, exec)) {
+ // State will be restored on getMore.
+ exec->saveState();
+
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ cursor->setPos(numResults);
+
+ // Don't stash the RU for tailable cursors at EOF, let them get a new RU on their
+ // next getMore.
+ if (!(pq.isTailable() && state == PlanExecutor::IS_EOF)) {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for
+ // subsequent getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
+ txn->setRecoveryUnit(engine->newRecoveryUnit(), OperationContext::kNotInUnitOfWork);
}
- return true;
+ } else {
+ cursorId = 0;
+ }
+
+ // Fill out curop based on the results.
+ endQueryOp(txn, exec, dbProfilingLevel, numResults, cursorId);
+
+ // 7) Generate the response object to send to the client.
+ appendCursorResponseObject(cursorId, nss.ns(), firstBatch.arr(), &result);
+ if (cursorId) {
+ cursorFreer.Dismiss();
}
+ return true;
+ }
- } findCmd;
+} findCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp
index aa3b8c9855a..348637d062a 100644
--- a/src/mongo/db/commands/fsync.cpp
+++ b/src/mongo/db/commands/fsync.cpp
@@ -57,232 +57,245 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
-
- class FSyncLockThread : public BackgroundJob {
- void doRealWork();
- public:
- FSyncLockThread() : BackgroundJob( true ) {}
- virtual ~FSyncLockThread(){}
- virtual string name() const { return "FSyncLockThread"; }
- virtual void run() {
- Client::initThread( "fsyncLockWorker" );
- try {
- doRealWork();
- }
- catch ( std::exception& e ) {
- error() << "FSyncLockThread exception: " << e.what() << endl;
- }
+using std::endl;
+using std::string;
+using std::stringstream;
+
+class FSyncLockThread : public BackgroundJob {
+ void doRealWork();
+
+public:
+ FSyncLockThread() : BackgroundJob(true) {}
+ virtual ~FSyncLockThread() {}
+ virtual string name() const {
+ return "FSyncLockThread";
+ }
+ virtual void run() {
+ Client::initThread("fsyncLockWorker");
+ try {
+ doRealWork();
+ } catch (std::exception& e) {
+ error() << "FSyncLockThread exception: " << e.what() << endl;
}
- };
-
- /* see unlockFsync() for unlocking:
- db.$cmd.sys.unlock.findOne()
- */
- class FSyncCommand : public Command {
- public:
- static const char* url() { return "http://dochub.mongodb.org/core/fsynccommand"; }
- bool locked;
- bool pendingUnlock;
- SimpleMutex m; // protects locked var above
- string err;
-
- stdx::condition_variable_any _threadSync;
- stdx::condition_variable_any _unlockSync;
-
- FSyncCommand() : Command( "fsync" ) { locked=false; pendingUnlock=false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual void help(stringstream& h) const { h << url(); }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::fsync);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+};
+
+/* see unlockFsync() for unlocking:
+ db.$cmd.sys.unlock.findOne()
+*/
+class FSyncCommand : public Command {
+public:
+ static const char* url() {
+ return "http://dochub.mongodb.org/core/fsynccommand";
+ }
+ bool locked;
+ bool pendingUnlock;
+ SimpleMutex m; // protects locked var above
+ string err;
+
+ stdx::condition_variable_any _threadSync;
+ stdx::condition_variable_any _unlockSync;
+
+ FSyncCommand() : Command("fsync") {
+ locked = false;
+ pendingUnlock = false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual void help(stringstream& h) const {
+ h << url();
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::fsync);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (txn->lockState()->isLocked()) {
+ errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- if (txn->lockState()->isLocked()) {
- errmsg = "fsync: Cannot execute fsync command from contexts that hold a data lock";
+
+ bool sync =
+ !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
+ bool lock = cmdObj["lock"].trueValue();
+ log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
+ if (lock) {
+ if (!sync) {
+ errmsg = "fsync: sync option must be true when using lock";
return false;
}
- bool sync = !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
- bool lock = cmdObj["lock"].trueValue();
- log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
- if( lock ) {
- if ( ! sync ) {
- errmsg = "fsync: sync option must be true when using lock";
- return false;
- }
-
- stdx::lock_guard<SimpleMutex> lk(m);
- err = "";
-
- (new FSyncLockThread())->go();
- while ( ! locked && err.size() == 0 ) {
- _threadSync.wait( m );
- }
-
- if ( err.size() ){
- errmsg = err;
- return false;
- }
-
- log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock" << endl;
- log() << " For more info see " << FSyncCommand::url() << endl;
- result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
- result.append("seeAlso", FSyncCommand::url());
+ stdx::lock_guard<SimpleMutex> lk(m);
+ err = "";
+ (new FSyncLockThread())->go();
+ while (!locked && err.size() == 0) {
+ _threadSync.wait(m);
}
- else {
- // the simple fsync command case
- if (sync) {
- // can this be GlobalRead? and if it can, it should be nongreedy.
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite w(txn->lockState());
- getDur().commitNow(txn);
-
- // No WriteUnitOfWork needed, as this does no writes of its own.
- }
-
- // Take a global IS lock to ensure the storage engine is not shutdown
- Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- result.append( "numFiles" , storageEngine->flushAllFiles( sync ) );
- }
- return 1;
- }
- } fsyncCmd;
- namespace {
- bool unlockFsync();
- } // namespace
+ if (err.size()) {
+ errmsg = err;
+ return false;
+ }
- class FSyncUnlockCommand : public Command {
- public:
+ log() << "db is now locked, no writes allowed. db.fsyncUnlock() to unlock" << endl;
+ log() << " For more info see " << FSyncCommand::url() << endl;
+ result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
+ result.append("seeAlso", FSyncCommand::url());
- FSyncUnlockCommand() : Command("fsyncUnlock") {}
+ } else {
+ // the simple fsync command case
+ if (sync) {
+ // can this be GlobalRead? and if it can, it should be nongreedy.
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite w(txn->lockState());
+ getDur().commitNow(txn);
- bool isWriteCommandForConfigServer() const override { return false; }
+ // No WriteUnitOfWork needed, as this does no writes of its own.
+ }
- bool slaveOk() const override { return true; }
+ // Take a global IS lock to ensure the storage engine is not shutdown
+ Lock::GlobalLock global(txn->lockState(), MODE_IS, UINT_MAX);
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ result.append("numFiles", storageEngine->flushAllFiles(sync));
+ }
+ return 1;
+ }
+} fsyncCmd;
- bool adminOnly() const override { return true; }
+namespace {
+bool unlockFsync();
+} // namespace
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
+class FSyncUnlockCommand : public Command {
+public:
+ FSyncUnlockCommand() : Command("fsyncUnlock") {}
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::unlock);
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ bool slaveOk() const override {
+ return true;
+ }
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
+ bool adminOnly() const override {
+ return true;
+ }
- log() << "command: unlock requested";
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::unlock);
- if (unlockFsync()) {
- result.append("info", "unlock completed");
- return true;
- }
- else {
- errmsg = "not locked";
- return false;
- }
- }
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
- } unlockFsyncCmd;
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ log() << "command: unlock requested";
- SimpleMutex filesLockedFsync;
+ if (unlockFsync()) {
+ result.append("info", "unlock completed");
+ return true;
+ } else {
+ errmsg = "not locked";
+ return false;
+ }
+ }
- void FSyncLockThread::doRealWork() {
- stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
+} unlockFsyncCmd;
- OperationContextImpl txn;
- ScopedTransaction transaction(&txn, MODE_X);
- Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
+SimpleMutex filesLockedFsync;
- stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
+void FSyncLockThread::doRealWork() {
+ stdx::lock_guard<SimpleMutex> lkf(filesLockedFsync);
- invariant(!fsyncCmd.locked); // impossible to get here if locked is true
- try {
- getDur().syncDataAndTruncateJournal(&txn);
- }
- catch( std::exception& e ) {
- error() << "error doing syncDataAndTruncateJournal: " << e.what() << endl;
- fsyncCmd.err = e.what();
- fsyncCmd._threadSync.notify_one();
- fsyncCmd.locked = false;
- return;
- }
+ OperationContextImpl txn;
+ ScopedTransaction transaction(&txn, MODE_X);
+ Lock::GlobalWrite global(txn.lockState()); // No WriteUnitOfWork needed
- txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
- try {
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->flushAllFiles(true);
- }
- catch( std::exception& e ) {
- error() << "error doing flushAll: " << e.what() << endl;
- fsyncCmd.err = e.what();
- fsyncCmd._threadSync.notify_one();
- fsyncCmd.locked = false;
- return;
- }
+ invariant(!fsyncCmd.locked); // impossible to get here if locked is true
+ try {
+ getDur().syncDataAndTruncateJournal(&txn);
+ } catch (std::exception& e) {
+ error() << "error doing syncDataAndTruncateJournal: " << e.what() << endl;
+ fsyncCmd.err = e.what();
+ fsyncCmd._threadSync.notify_one();
+ fsyncCmd.locked = false;
+ return;
+ }
- invariant(!fsyncCmd.locked);
- fsyncCmd.locked = true;
+ txn.lockState()->downgradeGlobalXtoSForMMAPV1();
+ try {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->flushAllFiles(true);
+ } catch (std::exception& e) {
+ error() << "error doing flushAll: " << e.what() << endl;
+ fsyncCmd.err = e.what();
fsyncCmd._threadSync.notify_one();
+ fsyncCmd.locked = false;
+ return;
+ }
- while ( ! fsyncCmd.pendingUnlock ) {
- fsyncCmd._unlockSync.wait(fsyncCmd.m);
- }
- fsyncCmd.pendingUnlock = false;
+ invariant(!fsyncCmd.locked);
+ fsyncCmd.locked = true;
- fsyncCmd.locked = false;
- fsyncCmd.err = "unlocked";
+ fsyncCmd._threadSync.notify_one();
- fsyncCmd._unlockSync.notify_one();
+ while (!fsyncCmd.pendingUnlock) {
+ fsyncCmd._unlockSync.wait(fsyncCmd.m);
}
+ fsyncCmd.pendingUnlock = false;
+
+ fsyncCmd.locked = false;
+ fsyncCmd.err = "unlocked";
+
+ fsyncCmd._unlockSync.notify_one();
+}
+
+bool lockedForWriting() {
+ return fsyncCmd.locked;
+}
- bool lockedForWriting() {
- return fsyncCmd.locked;
+namespace {
+// @return true if unlocked
+bool unlockFsync() {
+ stdx::lock_guard<SimpleMutex> lk(fsyncCmd.m);
+ if (!fsyncCmd.locked) {
+ return false;
}
-
- namespace {
- // @return true if unlocked
- bool unlockFsync() {
- stdx::lock_guard<SimpleMutex> lk( fsyncCmd.m );
- if( !fsyncCmd.locked ) {
- return false;
- }
- fsyncCmd.pendingUnlock = true;
- fsyncCmd._unlockSync.notify_one();
- fsyncCmd._threadSync.notify_one();
+ fsyncCmd.pendingUnlock = true;
+ fsyncCmd._unlockSync.notify_one();
+ fsyncCmd._threadSync.notify_one();
- while ( fsyncCmd.locked ) {
- fsyncCmd._unlockSync.wait( fsyncCmd.m );
- }
- return true;
- }
- } // namespace
+ while (fsyncCmd.locked) {
+ fsyncCmd._unlockSync.wait(fsyncCmd.m);
+ }
+ return true;
+}
+} // namespace
}
diff --git a/src/mongo/db/commands/fsync.h b/src/mongo/db/commands/fsync.h
index 4072a1f6e50..1442a627d00 100644
--- a/src/mongo/db/commands/fsync.h
+++ b/src/mongo/db/commands/fsync.h
@@ -31,7 +31,7 @@
#include "mongo/util/concurrency/mutex.h"
namespace mongo {
- // Use this for blocking during an fsync-and-lock
- extern SimpleMutex filesLockedFsync;
- bool lockedForWriting();
+// Use this for blocking during an fsync-and-lock
+extern SimpleMutex filesLockedFsync;
+bool lockedForWriting();
} // namespace mongo
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 6f44749925d..5be50f433f6 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -53,271 +53,276 @@
namespace mongo {
- using std::unique_ptr;
- using std::stringstream;
-
- class Geo2dFindNearCmd : public Command {
- public:
- Geo2dFindNearCmd() : Command("geoNear") {}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- bool slaveOk() const { return true; }
- bool slaveOverrideOk() const { return true; }
-
- void help(stringstream& h) const {
- h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand";
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+using std::unique_ptr;
+using std::stringstream;
+
+class Geo2dFindNearCmd : public Command {
+public:
+ Geo2dFindNearCmd() : Command("geoNear") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ bool slaveOk() const {
+ return true;
+ }
+ bool slaveOverrideOk() const {
+ return true;
+ }
+
+ void help(stringstream& h) const {
+ h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!cmdObj["start"].eoo()) {
+ errmsg = "using deprecated 'start' argument to geoNear";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if (!cmdObj["start"].eoo()) {
- errmsg = "using deprecated 'start' argument to geoNear";
- return false;
- }
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
- AutoGetCollectionForRead ctx(txn, nss);
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
- if ( !collection ) {
- errmsg = "can't find ns";
- return false;
- }
-
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
-
- // cout << "raw cmd " << cmdObj.toString() << endl;
-
- // We seek to populate this.
- string nearFieldName;
- bool using2DIndex = false;
- if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
- return false;
- }
-
- PointWithCRS point;
- uassert(17304, "'near' field must be point",
- GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
-
- bool isSpherical = cmdObj["spherical"].trueValue();
- if (!using2DIndex) {
- uassert(17301, "2dsphere index must have spherical: true", isSpherical);
- }
-
- // Build the $near expression for the query.
- BSONObjBuilder nearBob;
- if (isSpherical) {
- nearBob.append("$nearSphere", cmdObj["near"].Obj());
- }
- else {
- nearBob.append("$near", cmdObj["near"].Obj());
- }
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ errmsg = "can't find ns";
+ return false;
+ }
- if (!cmdObj["maxDistance"].eoo()) {
- uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
- nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
- }
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
- if (!cmdObj["minDistance"].eoo()) {
- uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
- uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
- nearBob.append("$minDistance", cmdObj["minDistance"].number());
- }
+ // cout << "raw cmd " << cmdObj.toString() << endl;
- if (!cmdObj["uniqueDocs"].eoo()) {
- warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
- }
+ // We seek to populate this.
+ string nearFieldName;
+ bool using2DIndex = false;
+ if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
+ return false;
+ }
- // And, build the full query expression.
- BSONObjBuilder queryBob;
- queryBob.append(nearFieldName, nearBob.obj());
- if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
- queryBob.appendElements(cmdObj["query"].Obj());
- }
- BSONObj rewritten = queryBob.obj();
+ PointWithCRS point;
+ uassert(17304,
+ "'near' field must be point",
+ GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());
- // cout << "rewritten query: " << rewritten.toString() << endl;
+ bool isSpherical = cmdObj["spherical"].trueValue();
+ if (!using2DIndex) {
+ uassert(17301, "2dsphere index must have spherical: true", isSpherical);
+ }
- long long numWanted = 100;
- const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
- BSONElement eNumWanted = cmdObj[limitName];
- if (!eNumWanted.eoo()) {
- uassert(17303, "limit must be number", eNumWanted.isNumber());
- numWanted = eNumWanted.safeNumberLong();
- uassert(17302, "limit must be >=0", numWanted >= 0);
- }
+ // Build the $near expression for the query.
+ BSONObjBuilder nearBob;
+ if (isSpherical) {
+ nearBob.append("$nearSphere", cmdObj["near"].Obj());
+ } else {
+ nearBob.append("$near", cmdObj["near"].Obj());
+ }
- bool includeLocs = false;
- if (!cmdObj["includeLocs"].eoo()) {
- includeLocs = cmdObj["includeLocs"].trueValue();
- }
+ if (!cmdObj["maxDistance"].eoo()) {
+ uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
+ nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
+ }
- double distanceMultiplier = 1.0;
- BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
- if (!eDistanceMultiplier.eoo()) {
- uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
- distanceMultiplier = eDistanceMultiplier.number();
- uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
- }
+ if (!cmdObj["minDistance"].eoo()) {
+ uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
+ uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
+ nearBob.append("$minDistance", cmdObj["minDistance"].number());
+ }
- BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
- "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
-
- CanonicalQuery* cq;
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- if (!CanonicalQuery::canonicalize(nss,
- rewritten,
- BSONObj(),
- projObj,
- 0,
- numWanted,
- BSONObj(),
- &cq,
- whereCallback).isOK()) {
- errmsg = "Can't parse filter / create query";
- return false;
- }
+ if (!cmdObj["uniqueDocs"].eoo()) {
+ warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
+ }
- // Prevent chunks from being cleaned up during yields - this allows us to only check the
- // version on initial entry into geoNear.
- RangePreserver preserver(collection);
+ // And, build the full query expression.
+ BSONObjBuilder queryBob;
+ queryBob.append(nearFieldName, nearBob.obj());
+ if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
+ queryBob.appendElements(cmdObj["query"].Obj());
+ }
+ BSONObj rewritten = queryBob.obj();
- PlanExecutor* rawExec;
- if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
- errmsg = "can't get query executor";
- return false;
- }
+ // cout << "rewritten query: " << rewritten.toString() << endl;
- unique_ptr<PlanExecutor> exec(rawExec);
-
- double totalDistance = 0;
- BSONObjBuilder resultBuilder(result.subarrayStart("results"));
- double farthestDist = 0;
-
- BSONObj currObj;
- long long results = 0;
- while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
-
- // Come up with the correct distance.
- double dist = currObj["$dis"].number() * distanceMultiplier;
- totalDistance += dist;
- if (dist > farthestDist) { farthestDist = dist; }
-
- // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj'
- // in the command result.
- BSONObjIterator resIt(currObj);
- BSONObjBuilder resBob;
- while (resIt.more()) {
- BSONElement elt = resIt.next();
- if (!mongoutils::str::equals("$pt", elt.fieldName())
- && !mongoutils::str::equals("$dis", elt.fieldName())) {
- resBob.append(elt);
- }
- }
- BSONObj resObj = resBob.obj();
+ long long numWanted = 100;
+ const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
+ BSONElement eNumWanted = cmdObj[limitName];
+ if (!eNumWanted.eoo()) {
+ uassert(17303, "limit must be number", eNumWanted.isNumber());
+ numWanted = eNumWanted.safeNumberLong();
+ uassert(17302, "limit must be >=0", numWanted >= 0);
+ }
- // Don't make a too-big result object.
- if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
- warning() << "Too many geoNear results for query " << rewritten.toString()
- << ", truncating output.";
- break;
- }
+ bool includeLocs = false;
+ if (!cmdObj["includeLocs"].eoo()) {
+ includeLocs = cmdObj["includeLocs"].trueValue();
+ }
- // Add the next result to the result builder.
- BSONObjBuilder oneResultBuilder(
- resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
- oneResultBuilder.append("dis", dist);
- if (includeLocs) {
- oneResultBuilder.appendAs(currObj["$pt"], "loc");
- }
- oneResultBuilder.append("obj", resObj);
- oneResultBuilder.done();
- ++results;
- }
+ double distanceMultiplier = 1.0;
+ BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
+ if (!eDistanceMultiplier.eoo()) {
+ uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
+ distanceMultiplier = eDistanceMultiplier.number();
+ uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
+ }
- resultBuilder.done();
+ BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
+ << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
- // Fill out the stats subobj.
- BSONObjBuilder stats(result.subobjStart("stats"));
+ CanonicalQuery* cq;
+ const WhereCallbackReal whereCallback(txn, nss.db());
- // Fill in nscanned from the explain.
- PlanSummaryStats summary;
- Explain::getSummaryStats(exec.get(), &summary);
- stats.appendNumber("nscanned", summary.totalKeysExamined);
- stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
+ if (!CanonicalQuery::canonicalize(
+ nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), &cq, whereCallback)
+ .isOK()) {
+ errmsg = "Can't parse filter / create query";
+ return false;
+ }
- stats.append("avgDistance", totalDistance / results);
- stats.append("maxDistance", farthestDist);
- stats.append("time", CurOp::get(txn)->elapsedMillis());
- stats.done();
+ // Prevent chunks from being cleaned up during yields - this allows us to only check the
+ // version on initial entry into geoNear.
+ RangePreserver preserver(collection);
- return true;
+ PlanExecutor* rawExec;
+ if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
+ errmsg = "can't get query executor";
+ return false;
}
- private:
- bool getFieldName(OperationContext* txn, Collection* collection, IndexCatalog* indexCatalog,
- string* fieldOut, string* errOut, bool *isFrom2D) {
- vector<IndexDescriptor*> idxs;
+ unique_ptr<PlanExecutor> exec(rawExec);
- // First, try 2d.
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
- if (idxs.size() > 1) {
- *errOut = "more than one 2d index, not sure which to run geoNear on";
- return false;
+ double totalDistance = 0;
+ BSONObjBuilder resultBuilder(result.subarrayStart("results"));
+ double farthestDist = 0;
+
+ BSONObj currObj;
+ long long results = 0;
+ while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
+ // Come up with the correct distance.
+ double dist = currObj["$dis"].number() * distanceMultiplier;
+ totalDistance += dist;
+ if (dist > farthestDist) {
+ farthestDist = dist;
}
- if (1 == idxs.size()) {
- BSONObj indexKp = idxs[0]->keyPattern();
- BSONObjIterator kpIt(indexKp);
- while (kpIt.more()) {
- BSONElement elt = kpIt.next();
- if (String == elt.type() && IndexNames::GEO_2D == elt.valuestr()) {
- *fieldOut = elt.fieldName();
- *isFrom2D = true;
- return true;
- }
+ // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj'
+ // in the command result.
+ BSONObjIterator resIt(currObj);
+ BSONObjBuilder resBob;
+ while (resIt.more()) {
+ BSONElement elt = resIt.next();
+ if (!mongoutils::str::equals("$pt", elt.fieldName()) &&
+ !mongoutils::str::equals("$dis", elt.fieldName())) {
+ resBob.append(elt);
}
}
+ BSONObj resObj = resBob.obj();
- // Next, 2dsphere.
- idxs.clear();
- collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
- if (0 == idxs.size()) {
- *errOut = "no geo indices for geoNear";
- return false;
+ // Don't make a too-big result object.
+ if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) {
+ warning() << "Too many geoNear results for query " << rewritten.toString()
+ << ", truncating output.";
+ break;
}
- if (idxs.size() > 1) {
- *errOut = "more than one 2dsphere index, not sure which to run geoNear on";
- return false;
+ // Add the next result to the result builder.
+ BSONObjBuilder oneResultBuilder(
+ resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
+ oneResultBuilder.append("dis", dist);
+ if (includeLocs) {
+ oneResultBuilder.appendAs(currObj["$pt"], "loc");
}
+ oneResultBuilder.append("obj", resObj);
+ oneResultBuilder.done();
+ ++results;
+ }
- // 1 == idx.size()
+ resultBuilder.done();
+
+ // Fill out the stats subobj.
+ BSONObjBuilder stats(result.subobjStart("stats"));
+
+ // Fill in nscanned from the explain.
+ PlanSummaryStats summary;
+ Explain::getSummaryStats(exec.get(), &summary);
+ stats.appendNumber("nscanned", summary.totalKeysExamined);
+ stats.appendNumber("objectsLoaded", summary.totalDocsExamined);
+
+ stats.append("avgDistance", totalDistance / results);
+ stats.append("maxDistance", farthestDist);
+ stats.append("time", CurOp::get(txn)->elapsedMillis());
+ stats.done();
+
+ return true;
+ }
+
+private:
+ bool getFieldName(OperationContext* txn,
+ Collection* collection,
+ IndexCatalog* indexCatalog,
+ string* fieldOut,
+ string* errOut,
+ bool* isFrom2D) {
+ vector<IndexDescriptor*> idxs;
+
+ // First, try 2d.
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2D, idxs);
+ if (idxs.size() > 1) {
+ *errOut = "more than one 2d index, not sure which to run geoNear on";
+ return false;
+ }
+
+ if (1 == idxs.size()) {
BSONObj indexKp = idxs[0]->keyPattern();
BSONObjIterator kpIt(indexKp);
while (kpIt.more()) {
BSONElement elt = kpIt.next();
- if (String == elt.type() && IndexNames::GEO_2DSPHERE == elt.valuestr()) {
+ if (String == elt.type() && IndexNames::GEO_2D == elt.valuestr()) {
*fieldOut = elt.fieldName();
- *isFrom2D = false;
+ *isFrom2D = true;
return true;
}
}
+ }
+ // Next, 2dsphere.
+ idxs.clear();
+ collection->getIndexCatalog()->findIndexByType(txn, IndexNames::GEO_2DSPHERE, idxs);
+ if (0 == idxs.size()) {
+ *errOut = "no geo indices for geoNear";
return false;
}
- } geo2dFindNearCmd;
+
+ if (idxs.size() > 1) {
+ *errOut = "more than one 2dsphere index, not sure which to run geoNear on";
+ return false;
+ }
+
+ // 1 == idx.size()
+ BSONObj indexKp = idxs[0]->keyPattern();
+ BSONObjIterator kpIt(indexKp);
+ while (kpIt.more()) {
+ BSONElement elt = kpIt.next();
+ if (String == elt.type() && IndexNames::GEO_2DSPHERE == elt.valuestr()) {
+ *fieldOut = elt.fieldName();
+ *isFrom2D = false;
+ return true;
+ }
+ }
+
+ return false;
+ }
+} geo2dFindNearCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/get_last_error.cpp b/src/mongo/db/commands/get_last_error.cpp
index ac80e1de823..2e5cd625086 100644
--- a/src/mongo/db/commands/get_last_error.cpp
+++ b/src/mongo/db/commands/get_last_error.cpp
@@ -44,264 +44,267 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- /* reset any errors so that getlasterror comes back clean.
-
- useful before performing a long series of operations where we want to
- see if any of the operations triggered an error, but don't want to check
- after each op as that woudl be a client/server turnaround.
- */
- class CmdResetError : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void help( stringstream& help ) const {
- help << "reset error state (used with getpreverror)";
+using std::string;
+using std::stringstream;
+
+/* reset any errors so that getlasterror comes back clean.
+
+ useful before performing a long series of operations where we want to
+ see if any of the operations triggered an error, but don't want to check
+ after each op as that woudl be a client/server turnaround.
+*/
+class CmdResetError : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void help(stringstream& help) const {
+ help << "reset error state (used with getpreverror)";
+ }
+ CmdResetError() : Command("resetError", false, "reseterror") {}
+ bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ LastError::get(txn->getClient()).reset();
+ return true;
+ }
+} cmdResetError;
+
+class CmdGetLastError : public Command {
+public:
+ CmdGetLastError() : Command("getLastError", false, "getlasterror") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ virtual void help(stringstream& help) const {
+ LastError::get(cc()).disable(); // SERVER-11492
+ help << "return error status of the last operation on this connection\n"
+ << "options:\n"
+ << " { fsync:true } - fsync before returning, or wait for journal commit if running "
+ "with --journal\n"
+ << " { j:true } - wait for journal commit if running with --journal\n"
+ << " { w:n } - await replication to n servers (including self) before returning\n"
+ << " { w:'majority' } - await replication to majority of set\n"
+ << " { wtimeout:m} - timeout for w in m milliseconds";
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ //
+ // Correct behavior here is very finicky.
+ //
+ // 1. The first step is to append the error that occurred on the previous operation.
+ // This adds an "err" field to the command, which is *not* the command failing.
+ //
+ // 2. Next we parse and validate write concern options. If these options are invalid
+ // the command fails no matter what, even if we actually had an error earlier. The
+ // reason for checking here is to match legacy behavior on these kind of failures -
+ // we'll still get an "err" field for the write error.
+ //
+ // 3. If we had an error on the previous operation, we then return immediately.
+ //
+ // 4. Finally, we actually enforce the write concern. All errors *except* timeout are
+ // reported with ok : 0.0, to match legacy behavior.
+ //
+ // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by
+ // the client (mongos) - in this case we *only* enforce the write concern if it is
+ // valid.
+ //
+ // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
+ // err is null.
+ //
+
+ LastError* le = &LastError::get(txn->getClient());
+ le->disable();
+
+ // Always append lastOp and connectionId
+ Client& c = *txn->getClient();
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeReplSet) {
+ const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
+ if (!lastOp.isNull()) {
+ result.append("lastOp", lastOp.getTimestamp());
+ // TODO(siyuan) Add "lastOpTerm"
+ }
}
- CmdResetError() : Command("resetError", false, "reseterror") {}
- bool run(OperationContext* txn,
- const string& db,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- LastError::get(txn->getClient()).reset();
- return true;
+
+ // for sharding; also useful in general for debugging
+ result.appendNumber("connectionId", c.getConnectionId());
+
+ Timestamp lastTimestamp;
+ BSONField<Timestamp> wOpTimeField("wOpTime");
+ FieldParser::FieldState extracted =
+ FieldParser::extract(cmdObj, wOpTimeField, &lastTimestamp, &errmsg);
+ if (!extracted) {
+ result.append("badGLE", cmdObj);
+ appendCommandStatus(result, false, errmsg);
+ return false;
}
- } cmdResetError;
-
- class CmdGetLastError : public Command {
- public:
- CmdGetLastError() : Command("getLastError", false, "getlasterror") { }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- virtual void help( stringstream& help ) const {
- LastError::get(cc()).disable(); // SERVER-11492
- help << "return error status of the last operation on this connection\n"
- << "options:\n"
- << " { fsync:true } - fsync before returning, or wait for journal commit if running with --journal\n"
- << " { j:true } - wait for journal commit if running with --journal\n"
- << " { w:n } - await replication to n servers (including self) before returning\n"
- << " { w:'majority' } - await replication to majority of set\n"
- << " { wtimeout:m} - timeout for w in m milliseconds";
+
+ repl::OpTime lastOpTime;
+ bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
+ if (!lastOpTimePresent) {
+ // Use the client opTime if no wOpTime is specified
+ lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp();
+ // TODO(siyuan) Fix mongos to supply wOpTimeTerm, then parse out that value here
+ } else {
+ // TODO(siyuan) Don't use the default term after fixing mongos.
+ lastOpTime = repl::OpTime(lastTimestamp, repl::OpTime::kDefaultTerm);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- //
- // Correct behavior here is very finicky.
- //
- // 1. The first step is to append the error that occurred on the previous operation.
- // This adds an "err" field to the command, which is *not* the command failing.
- //
- // 2. Next we parse and validate write concern options. If these options are invalid
- // the command fails no matter what, even if we actually had an error earlier. The
- // reason for checking here is to match legacy behavior on these kind of failures -
- // we'll still get an "err" field for the write error.
- //
- // 3. If we had an error on the previous operation, we then return immediately.
- //
- // 4. Finally, we actually enforce the write concern. All errors *except* timeout are
- // reported with ok : 0.0, to match legacy behavior.
- //
- // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by
- // the client (mongos) - in this case we *only* enforce the write concern if it is
- // valid.
- //
- // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if
- // err is null.
- //
-
- LastError *le = &LastError::get(txn->getClient());
- le->disable();
-
- // Always append lastOp and connectionId
- Client& c = *txn->getClient();
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() ==
- repl::ReplicationCoordinator::modeReplSet) {
- const repl::OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp();
- if (!lastOp.isNull()) {
- result.append("lastOp", lastOp.getTimestamp());
- // TODO(siyuan) Add "lastOpTerm"
- }
- }
+ OID electionId;
+ BSONField<OID> wElectionIdField("wElectionId");
+ extracted = FieldParser::extract(cmdObj, wElectionIdField, &electionId, &errmsg);
+ if (!extracted) {
+ result.append("badGLE", cmdObj);
+ appendCommandStatus(result, false, errmsg);
+ return false;
+ }
- // for sharding; also useful in general for debugging
- result.appendNumber( "connectionId" , c.getConnectionId() );
-
- Timestamp lastTimestamp;
- BSONField<Timestamp> wOpTimeField("wOpTime");
- FieldParser::FieldState extracted = FieldParser::extract(cmdObj, wOpTimeField,
- &lastTimestamp, &errmsg);
- if (!extracted) {
- result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
- return false;
- }
+ bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
+ bool errorOccurred = false;
- repl::OpTime lastOpTime;
- bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE;
- if (!lastOpTimePresent) {
- // Use the client opTime if no wOpTime is specified
- lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp();
- // TODO(siyuan) Fix mongos to supply wOpTimeTerm, then parse out that value here
+ // Errors aren't reported when wOpTime is used
+ if (!lastOpTimePresent) {
+ if (le->getNPrev() != 1) {
+ errorOccurred = LastError::noError.appendSelf(result, false);
} else {
- // TODO(siyuan) Don't use the default term after fixing mongos.
- lastOpTime = repl::OpTime(lastTimestamp, repl::OpTime::kDefaultTerm);
- }
-
- OID electionId;
- BSONField<OID> wElectionIdField("wElectionId");
- extracted = FieldParser::extract(cmdObj, wElectionIdField,
- &electionId, &errmsg);
- if (!extracted) {
- result.append("badGLE", cmdObj);
- appendCommandStatus(result, false, errmsg);
- return false;
+ errorOccurred = le->appendSelf(result, false);
}
+ }
- bool electionIdPresent = extracted != FieldParser::FIELD_NONE;
- bool errorOccurred = false;
-
- // Errors aren't reported when wOpTime is used
- if ( !lastOpTimePresent ) {
- if ( le->getNPrev() != 1 ) {
- errorOccurred = LastError::noError.appendSelf( result, false );
- }
- else {
- errorOccurred = le->appendSelf( result, false );
- }
- }
+ BSONObj writeConcernDoc = cmdObj;
+ // Use the default options if we have no gle options aside from wOpTime/wElectionId
+ const int nFields = cmdObj.nFields();
+ bool useDefaultGLEOptions = (nFields == 1) || (nFields == 2 && lastOpTimePresent) ||
+ (nFields == 3 && lastOpTimePresent && electionIdPresent);
- BSONObj writeConcernDoc = cmdObj;
- // Use the default options if we have no gle options aside from wOpTime/wElectionId
- const int nFields = cmdObj.nFields();
- bool useDefaultGLEOptions = (nFields == 1) ||
- (nFields == 2 && lastOpTimePresent) ||
- (nFields == 3 && lastOpTimePresent && electionIdPresent);
+ WriteConcernOptions writeConcern;
- WriteConcernOptions writeConcern;
+ if (useDefaultGLEOptions) {
+ writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
+ }
- if (useDefaultGLEOptions) {
- writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault();
- }
+ Status status = writeConcern.parse(writeConcernDoc);
- Status status = writeConcern.parse( writeConcernDoc );
+ //
+ // Validate write concern no matter what, this matches 2.4 behavior
+ //
- //
- // Validate write concern no matter what, this matches 2.4 behavior
- //
+ if (status.isOK()) {
+ // Ensure options are valid for this host
+ status = validateWriteConcern(writeConcern);
+ }
- if ( status.isOK() ) {
- // Ensure options are valid for this host
- status = validateWriteConcern( writeConcern );
- }
+ if (!status.isOK()) {
+ result.append("badGLE", writeConcernDoc);
+ return appendCommandStatus(result, status);
+ }
- if ( !status.isOK() ) {
- result.append( "badGLE", writeConcernDoc );
- return appendCommandStatus( result, status );
- }
+ // Don't wait for replication if there was an error reported - this matches 2.4 behavior
+ if (errorOccurred) {
+ dassert(!lastOpTimePresent);
+ return true;
+ }
- // Don't wait for replication if there was an error reported - this matches 2.4 behavior
- if ( errorOccurred ) {
- dassert( !lastOpTimePresent );
- return true;
- }
+ // No error occurred, so we won't duplicate these fields with write concern errors
+ dassert(result.asTempObj()["err"].eoo());
+ dassert(result.asTempObj()["code"].eoo());
- // No error occurred, so we won't duplicate these fields with write concern errors
- dassert( result.asTempObj()["err"].eoo() );
- dassert( result.asTempObj()["code"].eoo() );
-
- // If we got an electionId, make sure it matches
- if (electionIdPresent) {
- if (repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet) {
- // Ignore electionIds of 0 from mongos.
- if (electionId != OID()) {
- errmsg = "wElectionId passed but no replication active";
- result.append("code", ErrorCodes::BadValue);
- return false;
- }
- }
- else {
- if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) {
- LOG(3) << "oid passed in is " << electionId
- << ", but our id is "
- << repl::getGlobalReplicationCoordinator()->getElectionId();
- errmsg = "election occurred after write";
- result.append("code", ErrorCodes::WriteConcernFailed);
- return false;
- }
+ // If we got an electionId, make sure it matches
+ if (electionIdPresent) {
+ if (repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet) {
+ // Ignore electionIds of 0 from mongos.
+ if (electionId != OID()) {
+ errmsg = "wElectionId passed but no replication active";
+ result.append("code", ErrorCodes::BadValue);
+ return false;
+ }
+ } else {
+ if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) {
+ LOG(3) << "oid passed in is " << electionId << ", but our id is "
+ << repl::getGlobalReplicationCoordinator()->getElectionId();
+ errmsg = "election occurred after write";
+ result.append("code", ErrorCodes::WriteConcernFailed);
+ return false;
}
}
+ }
- txn->setWriteConcern(writeConcern);
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- txn->setMessage_inlock( "waiting for write concern" );
- }
-
- WriteConcernResult wcResult;
- status = waitForWriteConcern( txn, lastOpTime, &wcResult );
- wcResult.appendTo( writeConcern, &result );
-
- // For backward compatibility with 2.4, wtimeout returns ok : 1.0
- if ( wcResult.wTimedOut ) {
- dassert( !wcResult.err.empty() ); // so we always report err
- dassert( !status.isOK() );
- result.append( "errmsg", "timed out waiting for slaves" );
- result.append( "code", status.code() );
- return true;
- }
-
- return appendCommandStatus( result, status );
+ txn->setWriteConcern(writeConcern);
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ txn->setMessage_inlock("waiting for write concern");
}
- } cmdGetLastError;
+ WriteConcernResult wcResult;
+ status = waitForWriteConcern(txn, lastOpTime, &wcResult);
+ wcResult.appendTo(writeConcern, &result);
- class CmdGetPrevError : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream& help ) const {
- help << "check for errors since last reseterror commandcal";
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- LastError *le = &LastError::get(txn->getClient());
- le->disable();
- le->appendSelf(result, true);
- if (le->isValid())
- result.append("nPrev", le->getNPrev());
- else
- result.append("nPrev", -1);
+ // For backward compatibility with 2.4, wtimeout returns ok : 1.0
+ if (wcResult.wTimedOut) {
+ dassert(!wcResult.err.empty()); // so we always report err
+ dassert(!status.isOK());
+ result.append("errmsg", "timed out waiting for slaves");
+ result.append("code", status.code());
return true;
}
- } cmdGetPrevError;
+ return appendCommandStatus(result, status);
+ }
+
+} cmdGetLastError;
+
+class CmdGetPrevError : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "check for errors since last reseterror commandcal";
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ LastError* le = &LastError::get(txn->getClient());
+ le->disable();
+ le->appendSelf(result, true);
+ if (le->isValid())
+ result.append("nPrev", le->getNPrev());
+ else
+ result.append("nPrev", -1);
+ return true;
+ }
+} cmdGetPrevError;
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index e075fbd047e..23805bf1123 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -57,351 +57,363 @@
namespace mongo {
- /**
- * A command for running getMore() against an existing cursor registered with a CursorManager.
- * Used to generate the next batch of results for a ClientCursor.
- *
- * Can be used in combination with any cursor-generating command (e.g. find, aggregate,
- * listIndexes).
- */
- class GetMoreCmd : public Command {
- MONGO_DISALLOW_COPYING(GetMoreCmd);
- public:
- GetMoreCmd() : Command("getMore") { }
+/**
+ * A command for running getMore() against an existing cursor registered with a CursorManager.
+ * Used to generate the next batch of results for a ClientCursor.
+ *
+ * Can be used in combination with any cursor-generating command (e.g. find, aggregate,
+ * listIndexes).
+ */
+class GetMoreCmd : public Command {
+ MONGO_DISALLOW_COPYING(GetMoreCmd);
- bool isWriteCommandForConfigServer() const override { return false; }
+public:
+ GetMoreCmd() : Command("getMore") {}
- bool slaveOk() const override { return false; }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- bool slaveOverrideOk() const override { return true; }
+ bool slaveOk() const override {
+ return false;
+ }
- bool maintenanceOk() const override { return false; }
+ bool slaveOverrideOk() const override {
+ return true;
+ }
- bool adminOnly() const override { return false; }
+ bool maintenanceOk() const override {
+ return false;
+ }
- void help(std::stringstream& help) const override {
- help << "retrieve more results from an existing cursor";
- }
+ bool adminOnly() const override {
+ return false;
+ }
- /**
- * A getMore command increments the getMore counter, not the command counter.
- */
- bool shouldAffectCommandCounter() const override { return false; }
+ void help(std::stringstream& help) const override {
+ help << "retrieve more results from an existing cursor";
+ }
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return GetMoreRequest::parseNs(dbname, cmdObj);
+ /**
+ * A getMore command increments the getMore counter, not the command counter.
+ */
+ bool shouldAffectCommandCounter() const override {
+ return false;
+ }
+
+ std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
+ return GetMoreRequest::parseNs(dbname, cmdObj);
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) override {
+ StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
+ }
+ const GetMoreRequest& request = parseStatus.getValue();
+
+ return AuthorizationSession::get(client)
+ ->checkAuthForGetMore(request.nss, request.cursorid);
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) override {
+ // Counted as a getMore, not as a command.
+ globalOpCounters.gotGetMore();
+
+ if (txn->getClient()->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run getMore command from eval()"));
}
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) override {
- StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
+ StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus.getStatus());
+ }
+ const GetMoreRequest& request = parseStatus.getValue();
+
+ // Depending on the type of cursor being operated on, we hold locks for the whole
+ // getMore, or none of the getMore, or part of the getMore. The three cases in detail:
+ //
+ // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
+ // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors
+ // don't own any collection state.
+ // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
+ // "unpinCollLock". This is because agg cursors handle locking internally (hence the
+ // release), but the pin and unpin of the cursor must occur under the collection
+ // lock. We don't use our AutoGetCollectionForRead "ctx" to relock, because
+ // AutoGetCollectionForRead checks the sharding version (and we want the relock for
+ // the unpin to succeed even if the sharding version has changed).
+ //
+ // Note that we declare our locks before our ClientCursorPin, in order to ensure that
+ // the pin's destructor is called before the lock destructors (so that the unpin occurs
+ // under the lock).
+ std::unique_ptr<AutoGetCollectionForRead> ctx;
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ CursorManager* cursorManager;
+ CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
+ if (globalCursorManager->ownsCursorId(request.cursorid)) {
+ cursorManager = globalCursorManager;
+ } else {
+ ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
+ Collection* collection = ctx->getCollection();
+ if (!collection) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ "collection dropped between getMore calls"));
}
- const GetMoreRequest& request = parseStatus.getValue();
+ cursorManager = collection->getCursorManager();
+ }
- return AuthorizationSession::get(client)->checkAuthForGetMore(request.nss,
- request.cursorid);
+ ClientCursorPin ccPin(cursorManager, request.cursorid);
+ ClientCursor* cursor = ccPin.c();
+ if (!cursor) {
+ // We didn't find the cursor.
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::CursorNotFound,
+ str::stream() << "Cursor not found, cursor id: " << request.cursorid));
}
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- // Counted as a getMore, not as a command.
- globalOpCounters.gotGetMore();
+ if (request.nss.ns() != cursor->ns()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::Unauthorized,
+ str::stream() << "Requested getMore on namespace '" << request.nss.ns()
+ << "', but cursor belongs to a different namespace"));
+ }
- if (txn->getClient()->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run getMore command from eval()"));
- }
+ const bool hasOwnMaxTime = CurOp::get(txn)->isMaxTimeSet();
- StatusWith<GetMoreRequest> parseStatus = GetMoreRequest::parseFromBSON(dbname, cmdObj);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus.getStatus());
- }
- const GetMoreRequest& request = parseStatus.getValue();
-
- // Depending on the type of cursor being operated on, we hold locks for the whole
- // getMore, or none of the getMore, or part of the getMore. The three cases in detail:
- //
- // 1) Normal cursor: we lock with "ctx" and hold it for the whole getMore.
- // 2) Cursor owned by global cursor manager: we don't lock anything. These cursors
- // don't own any collection state.
- // 3) Agg cursor: we lock with "ctx", then release, then relock with "unpinDBLock" and
- // "unpinCollLock". This is because agg cursors handle locking internally (hence the
- // release), but the pin and unpin of the cursor must occur under the collection
- // lock. We don't use our AutoGetCollectionForRead "ctx" to relock, because
- // AutoGetCollectionForRead checks the sharding version (and we want the relock for
- // the unpin to succeed even if the sharding version has changed).
- //
- // Note that we declare our locks before our ClientCursorPin, in order to ensure that
- // the pin's destructor is called before the lock destructors (so that the unpin occurs
- // under the lock).
- std::unique_ptr<AutoGetCollectionForRead> ctx;
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
-
- CursorManager* cursorManager;
- CursorManager* globalCursorManager = CursorManager::getGlobalCursorManager();
- if (globalCursorManager->ownsCursorId(request.cursorid)) {
- cursorManager = globalCursorManager;
- }
- else {
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
- Collection* collection = ctx->getCollection();
- if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::OperationFailed,
- "collection dropped between getMore calls"));
- }
- cursorManager = collection->getCursorManager();
- }
+ // Validation related to awaitData.
+ if (isCursorAwaitData(cursor)) {
+ invariant(isCursorTailable(cursor));
- ClientCursorPin ccPin(cursorManager, request.cursorid);
- ClientCursor* cursor = ccPin.c();
- if (!cursor) {
- // We didn't find the cursor.
- return appendCommandStatus(result, Status(ErrorCodes::CursorNotFound, str::stream()
- << "Cursor not found, cursor id: " << request.cursorid));
+ if (!hasOwnMaxTime) {
+ Status status(ErrorCodes::BadValue,
+ str::stream() << "Must set maxTimeMS on a getMore if the initial "
+ << "query had 'awaitData' set: " << cmdObj);
+ return appendCommandStatus(result, status);
}
- if (request.nss.ns() != cursor->ns()) {
- return appendCommandStatus(result, Status(ErrorCodes::Unauthorized, str::stream()
- << "Requested getMore on namespace '" << request.nss.ns()
- << "', but cursor belongs to a different namespace"));
+ if (cursor->isAggCursor()) {
+ Status status(ErrorCodes::BadValue,
+ "awaitData cannot be set on an aggregation cursor");
+ return appendCommandStatus(result, status);
}
+ }
- const bool hasOwnMaxTime = CurOp::get(txn)->isMaxTimeSet();
-
- // Validation related to awaitData.
- if (isCursorAwaitData(cursor)) {
- invariant(isCursorTailable(cursor));
+ // On early return, get rid of the cursor.
+ ScopeGuard cursorFreer = MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin, request);
- if (!hasOwnMaxTime) {
- Status status(ErrorCodes::BadValue,
- str::stream() << "Must set maxTimeMS on a getMore if the initial "
- << "query had 'awaitData' set: " << cmdObj);
- return appendCommandStatus(result, status);
- }
+ if (!cursor->hasRecoveryUnit()) {
+ // Start using a new RecoveryUnit.
+ cursor->setOwnedRecoveryUnit(
+ getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
+ }
- if (cursor->isAggCursor()) {
- Status status(ErrorCodes::BadValue,
- "awaitData cannot be set on an aggregation cursor");
- return appendCommandStatus(result, status);
- }
- }
+ // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
+ ScopedRecoveryUnitSwapper ruSwapper(cursor, txn);
- // On early return, get rid of the cursor.
- ScopeGuard cursorFreer = MakeGuard(&GetMoreCmd::cleanupCursor, txn, &ccPin, request);
+ // Reset timeout timer on the cursor since the cursor is still in use.
+ cursor->setIdleTime(0);
- if (!cursor->hasRecoveryUnit()) {
- // Start using a new RecoveryUnit.
- cursor->setOwnedRecoveryUnit(
- getGlobalServiceContext()->getGlobalStorageEngine()->newRecoveryUnit());
- }
+ // If there is no time limit set directly on this getMore command, but the operation
+ // that spawned this cursor had a time limit set, then we have to apply any leftover
+ // time to this getMore.
+ if (!hasOwnMaxTime) {
+ CurOp::get(txn)->setMaxTimeMicros(cursor->getLeftoverMaxTimeMicros());
+ }
+ txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
- // Swap RecoveryUnit(s) between the ClientCursor and OperationContext.
- ScopedRecoveryUnitSwapper ruSwapper(cursor, txn);
+ if (cursor->isAggCursor()) {
+ // Agg cursors handle their own locking internally.
+ ctx.reset(); // unlocks
+ }
- // Reset timeout timer on the cursor since the cursor is still in use.
- cursor->setIdleTime(0);
+ PlanExecutor* exec = cursor->getExecutor();
+ exec->restoreState(txn);
- // If there is no time limit set directly on this getMore command, but the operation
- // that spawned this cursor had a time limit set, then we have to apply any leftover
- // time to this getMore.
- if (!hasOwnMaxTime) {
- CurOp::get(txn)->setMaxTimeMicros(cursor->getLeftoverMaxTimeMicros());
- }
- txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
+ // If we're tailing a capped collection, retrieve a monotonically increasing insert
+ // counter.
+ uint64_t lastInsertCount = 0;
+ if (isCursorAwaitData(cursor)) {
+ invariant(ctx->getCollection()->isCapped());
+ lastInsertCount = ctx->getCollection()->getCappedInsertNotifier()->getCount();
+ }
- if (cursor->isAggCursor()) {
- // Agg cursors handle their own locking internally.
- ctx.reset(); // unlocks
- }
+ CursorId respondWithId = 0;
+ BSONArrayBuilder nextBatch;
+ BSONObj obj;
+ PlanExecutor::ExecState state;
+ int numResults = 0;
+ Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
+ if (!batchStatus.isOK()) {
+ return appendCommandStatus(result, batchStatus);
+ }
- PlanExecutor* exec = cursor->getExecutor();
+ // If this is an await data cursor, and we hit EOF without generating any results, then
+ // we block waiting for new oplog data to arrive.
+ if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
+ // Retrieve the notifier which we will wait on until new data arrives. We make sure
+ // to do this in the lock because once we drop the lock it is possible for the
+ // collection to become invalid. The notifier itself will outlive the collection if
+ // the collection is dropped, as we keep a shared_ptr to it.
+ auto notifier = ctx->getCollection()->getCappedInsertNotifier();
+
+ // Save the PlanExecutor and drop our locks.
+ exec->saveState();
+ ctx.reset();
+
+ // Block waiting for data.
+ Microseconds timeout(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ notifier->waitForInsert(lastInsertCount, timeout);
+ notifier.reset();
+
+ ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
exec->restoreState(txn);
- // If we're tailing a capped collection, retrieve a monotonically increasing insert
- // counter.
- uint64_t lastInsertCount = 0;
- if (isCursorAwaitData(cursor)) {
- invariant(ctx->getCollection()->isCapped());
- lastInsertCount = ctx->getCollection()->getCappedInsertNotifier()->getCount();
- }
-
- CursorId respondWithId = 0;
- BSONArrayBuilder nextBatch;
- BSONObj obj;
- PlanExecutor::ExecState state;
- int numResults = 0;
- Status batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
+ // We woke up because either the timed_wait expired, or there was more data. Either
+ // way, attempt to generate another batch of results.
+ batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
if (!batchStatus.isOK()) {
return appendCommandStatus(result, batchStatus);
}
+ }
- // If this is an await data cursor, and we hit EOF without generating any results, then
- // we block waiting for new oplog data to arrive.
- if (isCursorAwaitData(cursor) && state == PlanExecutor::IS_EOF && numResults == 0) {
- // Retrieve the notifier which we will wait on until new data arrives. We make sure
- // to do this in the lock because once we drop the lock it is possible for the
- // collection to become invalid. The notifier itself will outlive the collection if
- // the collection is dropped, as we keep a shared_ptr to it.
- auto notifier = ctx->getCollection()->getCappedInsertNotifier();
-
- // Save the PlanExecutor and drop our locks.
- exec->saveState();
- ctx.reset();
-
- // Block waiting for data.
- Microseconds timeout(CurOp::get(txn)->getRemainingMaxTimeMicros());
- notifier->waitForInsert(lastInsertCount, timeout);
- notifier.reset();
-
- ctx.reset(new AutoGetCollectionForRead(txn, request.nss));
- exec->restoreState(txn);
-
- // We woke up because either the timed_wait expired, or there was more data. Either
- // way, attempt to generate another batch of results.
- batchStatus = generateBatch(cursor, request, &nextBatch, &state, &numResults);
- if (!batchStatus.isOK()) {
- return appendCommandStatus(result, batchStatus);
- }
- }
-
- if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
- respondWithId = request.cursorid;
+ if (shouldSaveCursorGetMore(state, exec, isCursorTailable(cursor))) {
+ respondWithId = request.cursorid;
- exec->saveState();
+ exec->saveState();
- // If maxTimeMS was set directly on the getMore rather than being rolled over
- // from a previous find, then don't roll remaining micros over to the next
- // getMore.
- if (!hasOwnMaxTime) {
- cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
- }
+ // If maxTimeMS was set directly on the getMore rather than being rolled over
+ // from a previous find, then don't roll remaining micros over to the next
+ // getMore.
+ if (!hasOwnMaxTime) {
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+ }
- cursor->incPos(numResults);
+ cursor->incPos(numResults);
- if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
- // Rather than swapping their existing RU into the client cursor, tailable
- // cursors should get a new recovery unit.
- ruSwapper.dismiss();
- }
- }
- else {
- CurOp::get(txn)->debug().cursorExhausted = true;
+ if (isCursorTailable(cursor) && state == PlanExecutor::IS_EOF) {
+ // Rather than swapping their existing RU into the client cursor, tailable
+ // cursors should get a new recovery unit.
+ ruSwapper.dismiss();
}
+ } else {
+ CurOp::get(txn)->debug().cursorExhausted = true;
+ }
- appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
+ appendGetMoreResponseObject(respondWithId, request.nss.ns(), nextBatch.arr(), &result);
- if (respondWithId) {
- cursorFreer.Dismiss();
+ if (respondWithId) {
+ cursorFreer.Dismiss();
- // If we are operating on an aggregation cursor, then we dropped our collection lock
- // earlier and need to reacquire it in order to clean up our ClientCursorPin.
- if (cursor->isAggCursor()) {
- invariant(NULL == ctx.get());
- unpinDBLock.reset(
- new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
- unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
- }
+ // If we are operating on an aggregation cursor, then we dropped our collection lock
+ // earlier and need to reacquire it in order to clean up our ClientCursorPin.
+ if (cursor->isAggCursor()) {
+ invariant(NULL == ctx.get());
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinCollLock.reset(
+ new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
-
- return true;
}
- /**
- * Uses 'cursor' and 'request' to fill out 'nextBatch' with the batch of result documents to
- * be returned by this getMore.
- *
- * Returns the number of documents in the batch in *numResults, which must be initialized to
- * zero by the caller. Returns the final ExecState returned by the cursor in *state.
- *
- * Returns an OK status if the batch was successfully generated, and a non-OK status if the
- * PlanExecutor encounters a failure.
- */
- Status generateBatch(ClientCursor* cursor,
- const GetMoreRequest& request,
- BSONArrayBuilder* nextBatch,
- PlanExecutor::ExecState* state,
- int* numResults) {
- PlanExecutor* exec = cursor->getExecutor();
- const bool isAwaitData = isCursorAwaitData(cursor);
-
- // If an awaitData getMore is killed during this process due to our max time expiring at
- // an interrupt point, we just continue as normal and return rather than reporting a
- // timeout to the user.
- BSONObj obj;
- try {
- while (PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) {
- // If adding this object will cause us to exceed the BSON size limit, then we
- // stash it for later.
- if (nextBatch->len() + obj.objsize() > BSONObjMaxUserSize && *numResults > 0) {
- exec->enqueue(obj);
- break;
- }
-
- // Add result to output buffer.
- nextBatch->append(obj);
- (*numResults)++;
-
- if (enoughForGetMore(request.batchSize.value_or(0),
- *numResults, nextBatch->len())) {
- break;
- }
- }
- }
- catch (const UserException& except) {
- if (isAwaitData && except.getCode() == ErrorCodes::ExceededTimeLimit) {
- // We ignore exceptions from interrupt points due to max time expiry for
- // awaitData cursors.
- }
- else {
- throw;
+ return true;
+ }
+
+ /**
+ * Uses 'cursor' and 'request' to fill out 'nextBatch' with the batch of result documents to
+ * be returned by this getMore.
+ *
+ * Returns the number of documents in the batch in *numResults, which must be initialized to
+ * zero by the caller. Returns the final ExecState returned by the cursor in *state.
+ *
+ * Returns an OK status if the batch was successfully generated, and a non-OK status if the
+ * PlanExecutor encounters a failure.
+ */
+ Status generateBatch(ClientCursor* cursor,
+ const GetMoreRequest& request,
+ BSONArrayBuilder* nextBatch,
+ PlanExecutor::ExecState* state,
+ int* numResults) {
+ PlanExecutor* exec = cursor->getExecutor();
+ const bool isAwaitData = isCursorAwaitData(cursor);
+
+ // If an awaitData getMore is killed during this process due to our max time expiring at
+ // an interrupt point, we just continue as normal and return rather than reporting a
+ // timeout to the user.
+ BSONObj obj;
+ try {
+ while (PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) {
+ // If adding this object will cause us to exceed the BSON size limit, then we
+ // stash it for later.
+ if (nextBatch->len() + obj.objsize() > BSONObjMaxUserSize && *numResults > 0) {
+ exec->enqueue(obj);
+ break;
}
- }
- if (PlanExecutor::FAILURE == *state || PlanExecutor::DEAD == *state) {
- const std::unique_ptr<PlanStageStats> stats(exec->getStats());
- error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
- << ", stats: " << Explain::statsToBSON(*stats);
+ // Add result to output buffer.
+ nextBatch->append(obj);
+ (*numResults)++;
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "GetMore command executor error: "
- << WorkingSetCommon::toStatusString(obj));
+ if (enoughForGetMore(
+ request.batchSize.value_or(0), *numResults, nextBatch->len())) {
+ break;
+ }
+ }
+ } catch (const UserException& except) {
+ if (isAwaitData && except.getCode() == ErrorCodes::ExceededTimeLimit) {
+ // We ignore exceptions from interrupt points due to max time expiry for
+ // awaitData cursors.
+ } else {
+ throw;
}
-
- return Status::OK();
}
- /**
- * Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
- * cleaned up properly.
- */
- static void cleanupCursor(OperationContext* txn,
- ClientCursorPin* ccPin,
- const GetMoreRequest& request) {
- ClientCursor* cursor = ccPin->c();
+ if (PlanExecutor::FAILURE == *state || PlanExecutor::DEAD == *state) {
+ const std::unique_ptr<PlanStageStats> stats(exec->getStats());
+ error() << "GetMore command executor error: " << PlanExecutor::statestr(*state)
+ << ", stats: " << Explain::statsToBSON(*stats);
- std::unique_ptr<Lock::DBLock> unpinDBLock;
- std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "GetMore command executor error: "
+ << WorkingSetCommon::toStatusString(obj));
+ }
- if (cursor->isAggCursor()) {
- unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
- unpinCollLock.reset(
- new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
- }
+ return Status::OK();
+ }
- ccPin->deleteUnderlying();
+ /**
+ * Called via a ScopeGuard on early return in order to ensure that the ClientCursor gets
+ * cleaned up properly.
+ */
+ static void cleanupCursor(OperationContext* txn,
+ ClientCursorPin* ccPin,
+ const GetMoreRequest& request) {
+ ClientCursor* cursor = ccPin->c();
+
+ std::unique_ptr<Lock::DBLock> unpinDBLock;
+ std::unique_ptr<Lock::CollectionLock> unpinCollLock;
+
+ if (cursor->isAggCursor()) {
+ unpinDBLock.reset(new Lock::DBLock(txn->lockState(), request.nss.db(), MODE_IS));
+ unpinCollLock.reset(
+ new Lock::CollectionLock(txn->lockState(), request.nss.ns(), MODE_IS));
}
- } getMoreCmd;
+ ccPin->deleteUnderlying();
+ }
+
+} getMoreCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
index 31a03fa2543..cdfb9ca0d5a 100644
--- a/src/mongo/db/commands/group.cpp
+++ b/src/mongo/db/commands/group.cpp
@@ -44,176 +44,164 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- static GroupCommand cmdGroup;
+static GroupCommand cmdGroup;
- GroupCommand::GroupCommand() : Command("group") {}
+GroupCommand::GroupCommand() : Command("group") {}
- Status GroupCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- std::string ns = parseNs(dbname, cmdObj);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
- NamespaceString(ns), ActionType::find)) {
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
- return Status::OK();
+Status GroupCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ std::string ns = parseNs(dbname, cmdObj);
+ if (!AuthorizationSession::get(client)
+ ->isAuthorizedForActionsOnNamespace(NamespaceString(ns), ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
}
-
- std::string GroupCommand::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
- uassert(17211, "ns has to be set", p["ns"].type() == String);
- return dbname + "." + p["ns"].String();
+ return Status::OK();
+}
+
+std::string GroupCommand::parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
+ uassert(17211, "ns has to be set", p["ns"].type() == String);
+ return dbname + "." + p["ns"].String();
+}
+
+Status GroupCommand::parseRequest(const string& dbname,
+ const BSONObj& cmdObj,
+ GroupRequest* request) const {
+ request->ns = parseNs(dbname, cmdObj);
+
+ // By default, group requests are regular group not explain of group.
+ request->explain = false;
+
+ const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
+
+ if (p["cond"].type() == Object) {
+ request->query = p["cond"].embeddedObject().getOwned();
+ } else if (p["condition"].type() == Object) {
+ request->query = p["condition"].embeddedObject().getOwned();
+ } else if (p["query"].type() == Object) {
+ request->query = p["query"].embeddedObject().getOwned();
+ } else if (p["q"].type() == Object) {
+ request->query = p["q"].embeddedObject().getOwned();
}
- Status GroupCommand::parseRequest(const string& dbname,
- const BSONObj& cmdObj,
- GroupRequest* request) const {
- request->ns = parseNs(dbname, cmdObj);
-
- // By default, group requests are regular group not explain of group.
- request->explain = false;
-
- const BSONObj& p = cmdObj.firstElement().embeddedObjectUserCheck();
-
- if (p["cond"].type() == Object) {
- request->query = p["cond"].embeddedObject().getOwned();
- }
- else if (p["condition"].type() == Object) {
- request->query = p["condition"].embeddedObject().getOwned();
- }
- else if (p["query"].type() == Object) {
- request->query = p["query"].embeddedObject().getOwned();
- }
- else if (p["q"].type() == Object) {
- request->query = p["q"].embeddedObject().getOwned();
- }
-
- if (p["key"].type() == Object) {
- request->keyPattern = p["key"].embeddedObjectUserCheck().getOwned();
- if (!p["$keyf"].eoo()) {
- return Status(ErrorCodes::BadValue, "can't have key and $keyf");
- }
- }
- else if (!p["$keyf"].eoo()) {
- request->keyFunctionCode = p["$keyf"]._asCode();
- }
- else {
- // No key specified. Use the entire object as the key.
- }
-
- BSONElement reduce = p["$reduce"];
- if (reduce.eoo()) {
- return Status(ErrorCodes::BadValue, "$reduce has to be set");
- }
- request->reduceCode = reduce._asCode();
-
- if (reduce.type() == CodeWScope) {
- request->reduceScope = reduce.codeWScopeObject().getOwned();
+ if (p["key"].type() == Object) {
+ request->keyPattern = p["key"].embeddedObjectUserCheck().getOwned();
+ if (!p["$keyf"].eoo()) {
+ return Status(ErrorCodes::BadValue, "can't have key and $keyf");
}
+ } else if (!p["$keyf"].eoo()) {
+ request->keyFunctionCode = p["$keyf"]._asCode();
+ } else {
+ // No key specified. Use the entire object as the key.
+ }
- if (p["initial"].type() != Object) {
- return Status(ErrorCodes::BadValue, "initial has to be an object");
- }
- request->initial = p["initial"].embeddedObject().getOwned();
+ BSONElement reduce = p["$reduce"];
+ if (reduce.eoo()) {
+ return Status(ErrorCodes::BadValue, "$reduce has to be set");
+ }
+ request->reduceCode = reduce._asCode();
- if (!p["finalize"].eoo()) {
- request->finalize = p["finalize"]._asCode();
- }
+ if (reduce.type() == CodeWScope) {
+ request->reduceScope = reduce.codeWScopeObject().getOwned();
+ }
- return Status::OK();
+ if (p["initial"].type() != Object) {
+ return Status(ErrorCodes::BadValue, "initial has to be an object");
}
+ request->initial = p["initial"].embeddedObject().getOwned();
- bool GroupCommand::run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int,
- std::string& errmsg,
- BSONObjBuilder& out) {
- GroupRequest groupRequest;
- Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
- if (!parseRequestStatus.isOK()) {
- return appendCommandStatus(out, parseRequestStatus);
- }
+ if (!p["finalize"].eoo()) {
+ request->finalize = p["finalize"]._asCode();
+ }
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
- Collection* coll = ctx.getCollection();
-
- PlanExecutor *rawPlanExecutor;
- Status getExecStatus = getExecutorGroup(txn,
- coll,
- groupRequest,
- PlanExecutor::YIELD_AUTO,
- &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return appendCommandStatus(out, getExecStatus);
- }
+ return Status::OK();
+}
+
+bool GroupCommand::run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& out) {
+ GroupRequest groupRequest;
+ Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
+ if (!parseRequestStatus.isOK()) {
+ return appendCommandStatus(out, parseRequestStatus);
+ }
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
-
- // Group executors return ADVANCED exactly once, with the entire group result.
- BSONObj retval;
- PlanExecutor::ExecState state = planExecutor->getNext(&retval, NULL);
- if (PlanExecutor::ADVANCED != state) {
- invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
-
- if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
- return appendCommandStatus(out, WorkingSetCommon::getMemberObjectStatus(retval));
- }
- return appendCommandStatus(out,
- Status(ErrorCodes::BadValue,
- str::stream() << "error encountered during group "
- << "operation, executor returned "
- << PlanExecutor::statestr(state)));
- }
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
- invariant(planExecutor->isEOF());
+ PlanExecutor* rawPlanExecutor;
+ Status getExecStatus =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
+ if (!getExecStatus.isOK()) {
+ return appendCommandStatus(out, getExecStatus);
+ }
- invariant(STAGE_GROUP == planExecutor->getRootStage()->stageType());
- GroupStage* groupStage = static_cast<GroupStage*>(planExecutor->getRootStage());
- const GroupStats* groupStats =
- static_cast<const GroupStats*>(groupStage->getSpecificStats());
- const CommonStats* groupChildStats = groupStage->getChildren()[0]->getCommonStats();
+ unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
- out.appendArray("retval", retval);
- out.append("count", static_cast<long long>(groupChildStats->advanced));
- out.append("keys", static_cast<long long>(groupStats->nGroups));
+ // Group executors return ADVANCED exactly once, with the entire group result.
+ BSONObj retval;
+ PlanExecutor::ExecState state = planExecutor->getNext(&retval, NULL);
+ if (PlanExecutor::ADVANCED != state) {
+ invariant(PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state);
- return true;
+ if (WorkingSetCommon::isValidStatusMemberObject(retval)) {
+ return appendCommandStatus(out, WorkingSetCommon::getMemberObjectStatus(retval));
+ }
+ return appendCommandStatus(out,
+ Status(ErrorCodes::BadValue,
+ str::stream() << "error encountered during group "
+ << "operation, executor returned "
+ << PlanExecutor::statestr(state)));
}
- Status GroupCommand::explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- GroupRequest groupRequest;
- Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
- if (!parseRequestStatus.isOK()) {
- return parseRequestStatus;
- }
+ invariant(planExecutor->isEOF());
+
+ invariant(STAGE_GROUP == planExecutor->getRootStage()->stageType());
+ GroupStage* groupStage = static_cast<GroupStage*>(planExecutor->getRootStage());
+ const GroupStats* groupStats = static_cast<const GroupStats*>(groupStage->getSpecificStats());
+ const CommonStats* groupChildStats = groupStage->getChildren()[0]->getCommonStats();
+
+ out.appendArray("retval", retval);
+ out.append("count", static_cast<long long>(groupChildStats->advanced));
+ out.append("keys", static_cast<long long>(groupStats->nGroups));
+
+ return true;
+}
+
+Status GroupCommand::explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ GroupRequest groupRequest;
+ Status parseRequestStatus = parseRequest(dbname, cmdObj, &groupRequest);
+ if (!parseRequestStatus.isOK()) {
+ return parseRequestStatus;
+ }
- groupRequest.explain = true;
+ groupRequest.explain = true;
- AutoGetCollectionForRead ctx(txn, groupRequest.ns);
- Collection* coll = ctx.getCollection();
+ AutoGetCollectionForRead ctx(txn, groupRequest.ns);
+ Collection* coll = ctx.getCollection();
- PlanExecutor *rawPlanExecutor;
- Status getExecStatus = getExecutorGroup(txn,
- coll,
- groupRequest,
- PlanExecutor::YIELD_AUTO,
- &rawPlanExecutor);
- if (!getExecStatus.isOK()) {
- return getExecStatus;
- }
+ PlanExecutor* rawPlanExecutor;
+ Status getExecStatus =
+ getExecutorGroup(txn, coll, groupRequest, PlanExecutor::YIELD_AUTO, &rawPlanExecutor);
+ if (!getExecStatus.isOK()) {
+ return getExecStatus;
+ }
- unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
+ unique_ptr<PlanExecutor> planExecutor(rawPlanExecutor);
- Explain::explainStages(planExecutor.get(), verbosity, out);
- return Status::OK();
- }
+ Explain::explainStages(planExecutor.get(), verbosity, out);
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/commands/group.h b/src/mongo/db/commands/group.h
index d6486a8d978..eab554dd1f8 100644
--- a/src/mongo/db/commands/group.h
+++ b/src/mongo/db/commands/group.h
@@ -32,60 +32,68 @@
namespace mongo {
- class ClientBasic;
- class Database;
- class OperationContext;
- class PlanExecutor;
- class Scope;
-
- struct GroupRequest;
-
- class GroupCommand : public Command {
- public:
- GroupCommand();
-
- private:
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual bool maintenanceOk() const { return false; }
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool slaveOverrideOk() const { return true; }
-
- virtual void help(std::stringstream& help) const {
- help << "http://dochub.mongodb.org/core/aggregation";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& jsobj,
- int,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const;
-
- /**
- * Parse a group command object.
- *
- * If 'cmdObj' is well-formed, returns Status::OK() and fills in out-argument 'request'.
- *
- * If a parsing error is encountered, returns an error Status.
- */
- Status parseRequest(const std::string& dbname,
- const BSONObj& cmdObj,
- GroupRequest* request) const;
- };
+class ClientBasic;
+class Database;
+class OperationContext;
+class PlanExecutor;
+class Scope;
+
+struct GroupRequest;
+
+class GroupCommand : public Command {
+public:
+ GroupCommand();
+
+private:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool maintenanceOk() const {
+ return false;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "http://dochub.mongodb.org/core/aggregation";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const;
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& jsobj,
+ int,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const;
+
+ /**
+ * Parse a group command object.
+ *
+ * If 'cmdObj' is well-formed, returns Status::OK() and fills in out-argument 'request'.
+ *
+ * If a parsing error is encountered, returns an error Status.
+ */
+ Status parseRequest(const std::string& dbname,
+ const BSONObj& cmdObj,
+ GroupRequest* request) const;
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp
index 21fc475469d..a4f7c437630 100644
--- a/src/mongo/db/commands/hashcmd.cpp
+++ b/src/mongo/db/commands/hashcmd.cpp
@@ -46,60 +46,66 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- // Testing only, enabled via command-line.
- class CmdHashElt : public Command {
- public:
- CmdHashElt() : Command("_hashBSONElement") {};
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual void help( stringstream& help ) const {
- help << "returns the hash of the first BSONElement val in a BSONObj";
- }
+// Testing only, enabled via command-line.
+class CmdHashElt : public Command {
+public:
+ CmdHashElt() : Command("_hashBSONElement"){};
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual void help(stringstream& help) const {
+ help << "returns the hash of the first BSONElement val in a BSONObj";
+ }
- /* CmdObj has the form {"hash" : <thingToHash>}
- * or {"hash" : <thingToHash>, "seed" : <number> }
- * Result has the form
- * {"key" : <thingTohash>, "seed" : <int>, "out": NumberLong(<hash>)}
- *
- * Example use in the shell:
- *> db.runCommand({hash: "hashthis", seed: 1})
- *> {"key" : "hashthis",
- *> "seed" : 1,
- *> "out" : NumberLong(6271151123721111923),
- *> "ok" : 1 }
- **/
- bool run(OperationContext* txn, const string& db,
- BSONObj& cmdObj,
- int options, string& errmsg,
- BSONObjBuilder& result){
- result.appendAs(cmdObj.firstElement(),"key");
+ /* CmdObj has the form {"hash" : <thingToHash>}
+ * or {"hash" : <thingToHash>, "seed" : <number> }
+ * Result has the form
+ * {"key" : <thingTohash>, "seed" : <int>, "out": NumberLong(<hash>)}
+ *
+ * Example use in the shell:
+ *> db.runCommand({hash: "hashthis", seed: 1})
+ *> {"key" : "hashthis",
+ *> "seed" : 1,
+ *> "out" : NumberLong(6271151123721111923),
+ *> "ok" : 1 }
+ **/
+ bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result.appendAs(cmdObj.firstElement(), "key");
- int seed = 0;
- if (cmdObj.hasField("seed")){
- if (! cmdObj["seed"].isNumber()) {
- errmsg += "seed must be a number";
- return false;
- }
- seed = cmdObj["seed"].numberInt();
+ int seed = 0;
+ if (cmdObj.hasField("seed")) {
+ if (!cmdObj["seed"].isNumber()) {
+ errmsg += "seed must be a number";
+ return false;
}
- result.append( "seed" , seed );
-
- result.append( "out" , BSONElementHasher::hash64( cmdObj.firstElement() , seed ) );
- return true;
+ seed = cmdObj["seed"].numberInt();
}
- };
- MONGO_INITIALIZER(RegisterHashEltCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CmdHashElt();
- }
- return Status::OK();
+ result.append("seed", seed);
+
+ result.append("out", BSONElementHasher::hash64(cmdObj.firstElement(), seed));
+ return true;
}
+};
+MONGO_INITIALIZER(RegisterHashEltCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CmdHashElt();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ac85733d3b5..20783c5b244 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -47,355 +47,357 @@
namespace {
- using std::string;
- using std::vector;
- using namespace mongo;
-
- /**
- * Utility function to extract error code and message from status
- * and append to BSON results.
- */
- void addStatus(const Status& status, BSONObjBuilder& builder) {
- builder.append("ok", status.isOK() ? 1.0 : 0.0);
- if (!status.isOK()) {
- builder.append("code", status.code());
- }
- if (!status.reason().empty()) {
- builder.append("errmsg", status.reason());
- }
- }
+using std::string;
+using std::vector;
+using namespace mongo;
- /**
- * Retrieves a collection's query settings and plan cache from the database.
- */
- static Status getQuerySettingsAndPlanCache(OperationContext* txn,
- Collection* collection,
- const string& ns,
- QuerySettings** querySettingsOut,
- PlanCache** planCacheOut) {
-
- *querySettingsOut = NULL;
- *planCacheOut = NULL;
- if (NULL == collection) {
- return Status(ErrorCodes::BadValue, "no such collection");
- }
+/**
+ * Utility function to extract error code and message from status
+ * and append to BSON results.
+ */
+void addStatus(const Status& status, BSONObjBuilder& builder) {
+ builder.append("ok", status.isOK() ? 1.0 : 0.0);
+ if (!status.isOK()) {
+ builder.append("code", status.code());
+ }
+ if (!status.reason().empty()) {
+ builder.append("errmsg", status.reason());
+ }
+}
- CollectionInfoCache* infoCache = collection->infoCache();
- invariant(infoCache);
+/**
+ * Retrieves a collection's query settings and plan cache from the database.
+ */
+static Status getQuerySettingsAndPlanCache(OperationContext* txn,
+ Collection* collection,
+ const string& ns,
+ QuerySettings** querySettingsOut,
+ PlanCache** planCacheOut) {
+ *querySettingsOut = NULL;
+ *planCacheOut = NULL;
+ if (NULL == collection) {
+ return Status(ErrorCodes::BadValue, "no such collection");
+ }
- QuerySettings* querySettings = infoCache->getQuerySettings();
- invariant(querySettings);
+ CollectionInfoCache* infoCache = collection->infoCache();
+ invariant(infoCache);
- *querySettingsOut = querySettings;
+ QuerySettings* querySettings = infoCache->getQuerySettings();
+ invariant(querySettings);
- PlanCache* planCache = infoCache->getPlanCache();
- invariant(planCache);
+ *querySettingsOut = querySettings;
- *planCacheOut = planCache;
+ PlanCache* planCache = infoCache->getPlanCache();
+ invariant(planCache);
- return Status::OK();
- }
+ *planCacheOut = planCache;
- //
- // Command instances.
- // Registers commands with the command system and make commands
- // available to the client.
- //
+ return Status::OK();
+}
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands, MONGO_NO_PREREQUISITES)(
- InitializerContext* context) {
+//
+// Command instances.
+// Registers commands with the command system and make commands
+// available to the client.
+//
- new ListFilters();
- new ClearFilters();
- new SetFilter();
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands,
+ MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+ new ListFilters();
+ new ClearFilters();
+ new SetFilter();
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
- using std::unique_ptr;
-
- IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
- : Command(name),
- helpText(helpText) { }
-
- bool IndexFilterCommand::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = parseNs(dbname, cmdObj);
+using std::string;
+using std::stringstream;
+using std::vector;
+using std::unique_ptr;
- Status status = runIndexFilterCommand(txn, ns, cmdObj, &result);
+IndexFilterCommand::IndexFilterCommand(const string& name, const string& helpText)
+ : Command(name), helpText(helpText) {}
- if (!status.isOK()) {
- addStatus(status, result);
- return false;
- }
+bool IndexFilterCommand::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
- return true;
- }
+ Status status = runIndexFilterCommand(txn, ns, cmdObj, &result);
- bool IndexFilterCommand::isWriteCommandForConfigServer() const { return false; }
-
- bool IndexFilterCommand::slaveOk() const {
+ if (!status.isOK()) {
+ addStatus(status, result);
return false;
}
- bool IndexFilterCommand::slaveOverrideOk() const {
- return true;
- }
+ return true;
+}
- void IndexFilterCommand::help(stringstream& ss) const {
- ss << helpText;
- }
-
- Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+bool IndexFilterCommand::isWriteCommandForConfigServer() const {
+ return false;
+}
- if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheIndexFilter)) {
- return Status::OK();
- }
+bool IndexFilterCommand::slaveOk() const {
+ return false;
+}
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
+bool IndexFilterCommand::slaveOverrideOk() const {
+ return true;
+}
- ListFilters::ListFilters() : IndexFilterCommand("planCacheListFilters",
- "Displays index filters for all query shapes in a collection.") { }
+void IndexFilterCommand::help(stringstream& ss) const {
+ ss << helpText;
+}
- Status ListFilters::runIndexFilterCommand(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+Status IndexFilterCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- QuerySettings* querySettings;
- PlanCache* unused;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
- if (!status.isOK()) {
- // No collection - return empty array of filters.
- BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
- hintsBuilder.doneFast();
- return Status::OK();
- }
- return list(*querySettings, bob);
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheIndexFilter)) {
+ return Status::OK();
}
- // static
- Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
- invariant(bob);
-
- // Format of BSON result:
- //
- // {
- // hints: [
- // {
- // query: <query>,
- // sort: <sort>,
- // projection: <projection>,
- // indexes: [<index1>, <index2>, <index3>, ...]
- // }
- // }
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+ListFilters::ListFilters()
+ : IndexFilterCommand("planCacheListFilters",
+ "Displays index filters for all query shapes in a collection.") {}
+
+Status ListFilters::runIndexFilterCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ QuerySettings* querySettings;
+ PlanCache* unused;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &unused);
+ if (!status.isOK()) {
+ // No collection - return empty array of filters.
BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
- OwnedPointerVector<AllowedIndexEntry> entries;
- entries.mutableVector() = querySettings.getAllAllowedIndices();
- for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
- i != entries.end(); ++i) {
- AllowedIndexEntry* entry = *i;
- invariant(entry);
-
- BSONObjBuilder hintBob(hintsBuilder.subobjStart());
- hintBob.append("query", entry->query);
- hintBob.append("sort", entry->sort);
- hintBob.append("projection", entry->projection);
- BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
- for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
- j != entry->indexKeyPatterns.end(); ++j) {
- const BSONObj& index = *j;
- indexesBuilder.append(index);
- }
- indexesBuilder.doneFast();
- }
hintsBuilder.doneFast();
return Status::OK();
}
+ return list(*querySettings, bob);
+}
- ClearFilters::ClearFilters() : IndexFilterCommand("planCacheClearFilters",
- "Clears index filter for a single query shape or, "
- "if the query shape is omitted, all filters for the collection.") { }
-
- Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
-
- QuerySettings* querySettings;
- PlanCache* planCache;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
- if (!status.isOK()) {
- // No collection - do nothing.
- return Status::OK();
- }
- return clear(txn, querySettings, planCache, ns, cmdObj);
- }
-
- // static
- Status ClearFilters::clear(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj) {
- invariant(querySettings);
-
- // According to the specification, the planCacheClearFilters command runs in two modes:
- // - clear all hints; or
- // - clear hints for single query shape when a query shape is described in the
- // command arguments.
- if (cmdObj.hasField("query")) {
- CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
-
- unique_ptr<CanonicalQuery> cq(cqRaw);
- querySettings->removeAllowedIndices(planCache->computeKey(*cq));
-
- // Remove entry from plan cache
- planCache->remove(*cq);
- return Status::OK();
- }
-
- // If query is not provided, make sure sort and projection are not in arguments.
- // We do not want to clear the entire cache inadvertently when the user
- // forgot to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
- return Status(ErrorCodes::BadValue, "sort or projection provided without query");
- }
+// static
+Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
+ invariant(bob);
- // Get entries from query settings. We need to remove corresponding entries from the plan
- // cache shortly.
- OwnedPointerVector<AllowedIndexEntry> entries;
- entries.mutableVector() = querySettings->getAllAllowedIndices();
-
- // OK to proceed with clearing entire cache.
- querySettings->clearAllowedIndices();
-
- const NamespaceString nss(ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- // Remove corresponding entries from plan cache.
- // Admin hints affect the planning process directly. If there were
- // plans generated as a result of applying index filter, these need to be
- // invalidated. This allows the planner to re-populate the plan cache with
- // non-filtered indexed solutions next time the query is run.
- // Resolve plan cache key from (query, sort, projection) in query settings entry.
- // Concurrency note: There's no harm in removing plan cache entries one at at time.
- // Only way that PlanCache::remove() can fail is when the query shape has been removed from
- // the cache by some other means (re-index, collection info reset, ...). This is OK since
- // that's the intended effect of calling the remove() function with the key from the hint entry.
- for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
- i != entries.end(); ++i) {
- AllowedIndexEntry* entry = *i;
- invariant(entry);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- Status result = CanonicalQuery::canonicalize(
- ns, entry->query, entry->sort, entry->projection, &cqRaw, whereCallback);
- invariant(result.isOK());
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Remove plan cache entry.
- planCache->remove(*cq);
+ // Format of BSON result:
+ //
+ // {
+ // hints: [
+ // {
+ // query: <query>,
+ // sort: <sort>,
+ // projection: <projection>,
+ // indexes: [<index1>, <index2>, <index3>, ...]
+ // }
+ // }
+ BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
+ OwnedPointerVector<AllowedIndexEntry> entries;
+ entries.mutableVector() = querySettings.getAllAllowedIndices();
+ for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ AllowedIndexEntry* entry = *i;
+ invariant(entry);
+
+ BSONObjBuilder hintBob(hintsBuilder.subobjStart());
+ hintBob.append("query", entry->query);
+ hintBob.append("sort", entry->sort);
+ hintBob.append("projection", entry->projection);
+ BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
+ for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
+ j != entry->indexKeyPatterns.end();
+ ++j) {
+ const BSONObj& index = *j;
+ indexesBuilder.append(index);
}
-
+ indexesBuilder.doneFast();
+ }
+ hintsBuilder.doneFast();
+ return Status::OK();
+}
+
+ClearFilters::ClearFilters()
+ : IndexFilterCommand("planCacheClearFilters",
+ "Clears index filter for a single query shape or, "
+ "if the query shape is omitted, all filters for the collection.") {}
+
+Status ClearFilters::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ QuerySettings* querySettings;
+ PlanCache* planCache;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ if (!status.isOK()) {
+ // No collection - do nothing.
return Status::OK();
}
-
- SetFilter::SetFilter() : IndexFilterCommand("planCacheSetFilter",
- "Sets index filter for a query shape. Overrides existing filter.") { }
-
- Status SetFilter::runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query settings is owned by the collection.
- const NamespaceString nss(ns);
- AutoGetCollectionForRead ctx(txn, nss);
-
- QuerySettings* querySettings;
- PlanCache* planCache;
- Status status =
- getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ return clear(txn, querySettings, planCache, ns, cmdObj);
+}
+
+// static
+Status ClearFilters::clear(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj) {
+ invariant(querySettings);
+
+ // According to the specification, the planCacheClearFilters command runs in two modes:
+ // - clear all hints; or
+ // - clear hints for single query shape when a query shape is described in the
+ // command arguments.
+ if (cmdObj.hasField("query")) {
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
if (!status.isOK()) {
return status;
}
- return set(txn, querySettings, planCache, ns, cmdObj);
+
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+ querySettings->removeAllowedIndices(planCache->computeKey(*cq));
+
+ // Remove entry from plan cache
+ planCache->remove(*cq);
+ return Status::OK();
}
- // static
- Status SetFilter::set(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const string& ns,
- const BSONObj& cmdObj) {
- // indexes - required
- BSONElement indexesElt = cmdObj.getField("indexes");
- if (indexesElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field indexes missing");
- }
- if (indexesElt.type() != mongo::Array) {
- return Status(ErrorCodes::BadValue, "required field indexes must be an array");
- }
- vector<BSONElement> indexesEltArray = indexesElt.Array();
- if (indexesEltArray.empty()) {
- return Status(ErrorCodes::BadValue,
- "required field indexes must contain at least one index");
- }
- vector<BSONObj> indexes;
- for (vector<BSONElement>::const_iterator i = indexesEltArray.begin();
- i != indexesEltArray.end(); ++i) {
- const BSONElement& elt = *i;
- if (!elt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "each item in indexes must be an object");
- }
- BSONObj obj = elt.Obj();
- if (obj.isEmpty()) {
- return Status(ErrorCodes::BadValue, "index specification cannot be empty");
- }
- indexes.push_back(obj.getOwned());
- }
+ // If query is not provided, make sure sort and projection are not in arguments.
+ // We do not want to clear the entire cache inadvertently when the user
+ // forgot to provide a value for "query".
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ }
+ // Get entries from query settings. We need to remove corresponding entries from the plan
+ // cache shortly.
+ OwnedPointerVector<AllowedIndexEntry> entries;
+ entries.mutableVector() = querySettings->getAllAllowedIndices();
+
+ // OK to proceed with clearing entire cache.
+ querySettings->clearAllowedIndices();
+
+ const NamespaceString nss(ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
+
+ // Remove corresponding entries from plan cache.
+ // Admin hints affect the planning process directly. If there were
+ // plans generated as a result of applying index filter, these need to be
+ // invalidated. This allows the planner to re-populate the plan cache with
+ // non-filtered indexed solutions next time the query is run.
+ // Resolve plan cache key from (query, sort, projection) in query settings entry.
+ // Concurrency note: There's no harm in removing plan cache entries one at at time.
+ // Only way that PlanCache::remove() can fail is when the query shape has been removed from
+ // the cache by some other means (re-index, collection info reset, ...). This is OK since
+ // that's the intended effect of calling the remove() function with the key from the hint entry.
+ for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin(); i != entries.end(); ++i) {
+ AllowedIndexEntry* entry = *i;
+ invariant(entry);
+
+ // Create canonical query.
CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ Status result = CanonicalQuery::canonicalize(
+ ns, entry->query, entry->sort, entry->projection, &cqRaw, whereCallback);
+ invariant(result.isOK());
unique_ptr<CanonicalQuery> cq(cqRaw);
- // Add allowed indices to query settings, overriding any previous entries.
- querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes);
-
- // Remove entry from plan cache.
+ // Remove plan cache entry.
planCache->remove(*cq);
+ }
- return Status::OK();
+ return Status::OK();
+}
+
+SetFilter::SetFilter()
+ : IndexFilterCommand("planCacheSetFilter",
+ "Sets index filter for a query shape. Overrides existing filter.") {}
+
+Status SetFilter::runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query settings is owned by the collection.
+ const NamespaceString nss(ns);
+ AutoGetCollectionForRead ctx(txn, nss);
+
+ QuerySettings* querySettings;
+ PlanCache* planCache;
+ Status status =
+ getQuerySettingsAndPlanCache(txn, ctx.getCollection(), ns, &querySettings, &planCache);
+ if (!status.isOK()) {
+ return status;
+ }
+ return set(txn, querySettings, planCache, ns, cmdObj);
+}
+
+// static
+Status SetFilter::set(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const string& ns,
+ const BSONObj& cmdObj) {
+ // indexes - required
+ BSONElement indexesElt = cmdObj.getField("indexes");
+ if (indexesElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field indexes missing");
+ }
+ if (indexesElt.type() != mongo::Array) {
+ return Status(ErrorCodes::BadValue, "required field indexes must be an array");
}
+ vector<BSONElement> indexesEltArray = indexesElt.Array();
+ if (indexesEltArray.empty()) {
+ return Status(ErrorCodes::BadValue,
+ "required field indexes must contain at least one index");
+ }
+ vector<BSONObj> indexes;
+ for (vector<BSONElement>::const_iterator i = indexesEltArray.begin();
+ i != indexesEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
+ if (!elt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "each item in indexes must be an object");
+ }
+ BSONObj obj = elt.Obj();
+ if (obj.isEmpty()) {
+ return Status(ErrorCodes::BadValue, "index specification cannot be empty");
+ }
+ indexes.push_back(obj.getOwned());
+ }
+
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Add allowed indices to query settings, overriding any previous entries.
+ querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes);
+
+ // Remove entry from plan cache.
+ planCache->remove(*cq);
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index 1ed6fa506c0..9b2815dba70 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -34,153 +34,153 @@
namespace mongo {
+/**
+ * DB commands for index filters.
+ * Index filter commands work on a different data structure in the collection
+ * info cache from the plan cache.
+ * The user still thinks of index filter commands as part of the plan cache functionality
+ * so the command name prefix is still "planCache".
+ *
+ * These are in a header to facilitate unit testing. See index_filter_commands_test.cpp.
+ */
+
+/**
+ * IndexFilterCommand
+ * Defines common attributes for all index filter related commands
+ * such as slaveOk.
+ */
+class IndexFilterCommand : public Command {
+public:
+ IndexFilterCommand(const std::string& name, const std::string& helpText);
+
/**
- * DB commands for index filters.
- * Index filter commands work on a different data structure in the collection
- * info cache from the plan cache.
- * The user still thinks of index filter commands as part of the plan cache functionality
- * so the command name prefix is still "planCache".
+ * Entry point from command subsystem.
+ * Implementation provides standardization of error handling
+ * such as adding error code and message to BSON result.
*
- * These are in a header to facilitate unit testing. See index_filter_commands_test.cpp.
+ * Do not override in derived classes.
+ * Override runPlanCacheCommands instead to
+ * implement plan cache command functionality.
*/
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual bool slaveOk() const;
+
+ virtual bool slaveOverrideOk() const;
+
+ virtual void help(std::stringstream& ss) const;
+
/**
- * IndexFilterCommand
- * Defines common attributes for all index filter related commands
- * such as slaveOk.
+ * One action type defined for index filter commands:
+ * - planCacheIndexFilter
*/
- class IndexFilterCommand : public Command {
- public:
- IndexFilterCommand(const std::string& name, const std::string& helpText);
-
- /**
- * Entry point from command subsystem.
- * Implementation provides standardization of error handling
- * such as adding error code and message to BSON result.
- *
- * Do not override in derived classes.
- * Override runPlanCacheCommands instead to
- * implement plan cache command functionality.
- */
-
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual bool slaveOk() const;
-
- virtual bool slaveOverrideOk() const;
-
- virtual void help(std::stringstream& ss) const;
-
- /**
- * One action type defined for index filter commands:
- * - planCacheIndexFilter
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
- /**
- * Subset of command arguments used by index filter commands
- * Override to provide command functionality.
- * Should contain just enough logic to invoke run*Command() function
- * in query_settings.h
- */
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) = 0;
-
- private:
- std::string helpText;
- };
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
/**
- * ListFilters
- *
- * { planCacheListFilters: <collection> }
- *
+ * Subset of command arguments used by index filter commands
+ * Override to provide command functionality.
+ * Should contain just enough logic to invoke run*Command() function
+ * in query_settings.h
+ */
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) = 0;
+
+private:
+ std::string helpText;
+};
+
+/**
+ * ListFilters
+ *
+ * { planCacheListFilters: <collection> }
+ *
+ */
+class ListFilters : public IndexFilterCommand {
+public:
+ ListFilters();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+
+ /**
+ * Looks up index filters from collection's query settings.
+ * Inserts index filters into BSON builder.
*/
- class ListFilters : public IndexFilterCommand {
- public:
- ListFilters();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Looks up index filters from collection's query settings.
- * Inserts index filters into BSON builder.
- */
- static Status list(const QuerySettings& querySettings, BSONObjBuilder* bob);
- };
+ static Status list(const QuerySettings& querySettings, BSONObjBuilder* bob);
+};
+
+/**
+ * ClearFilters
+ *
+ * { planCacheClearFilters: <collection>, query: <query>, sort: <sort>, projection: <projection> }
+ *
+ */
+class ClearFilters : public IndexFilterCommand {
+public:
+ ClearFilters();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * ClearFilters
- *
- * { planCacheClearFilters: <collection>, query: <query>, sort: <sort>, projection: <projection> }
- *
+ * If query shape is provided, clears index filter for a query.
+ * Otherwise, clears collection's filters.
+ * Namespace argument ns is ignored if we are clearing the entire cache.
+ * Removes corresponding entries from plan cache.
*/
- class ClearFilters : public IndexFilterCommand {
- public:
- ClearFilters();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * If query shape is provided, clears index filter for a query.
- * Otherwise, clears collection's filters.
- * Namespace argument ns is ignored if we are clearing the entire cache.
- * Removes corresponding entries from plan cache.
- */
- static Status clear(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status clear(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
+
+/**
+ * SetFilter
+ *
+ * {
+ * planCacheSetFilter: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>,
+ * indexes: [ <index1>, <index2>, <index3>, ... ]
+ * }
+ *
+ */
+class SetFilter : public IndexFilterCommand {
+public:
+ SetFilter();
+
+ virtual Status runIndexFilterCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * SetFilter
- *
- * {
- * planCacheSetFilter: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>,
- * indexes: [ <index1>, <index2>, <index3>, ... ]
- * }
- *
+ * Sets index filter for a query shape.
+ * Removes entry for query shape from plan cache.
*/
- class SetFilter : public IndexFilterCommand {
- public:
- SetFilter();
-
- virtual Status runIndexFilterCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Sets index filter for a query shape.
- * Removes entry for query shape from plan cache.
- */
- static Status set(OperationContext* txn,
- QuerySettings* querySettings,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status set(OperationContext* txn,
+ QuerySettings* querySettings,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 108c7d85bbb..b23b3d34ed8 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -43,287 +43,313 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- static const char* ns = "test.t";
-
- /**
- * Utility function to get list of index filters from the query settings.
- */
- vector<BSONObj> getFilters(const QuerySettings& querySettings) {
- BSONObjBuilder bob;
- ASSERT_OK(ListFilters::list(querySettings, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement filtersElt = resultObj.getField("filters");
- ASSERT_EQUALS(filtersElt.type(), mongo::Array);
- vector<BSONElement> filtersEltArray = filtersElt.Array();
- vector<BSONObj> filters;
- for (vector<BSONElement>::const_iterator i = filtersEltArray.begin();
- i != filtersEltArray.end(); ++i) {
- const BSONElement& elt = *i;
-
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // query
- BSONElement queryElt = obj.getField("query");
- ASSERT_TRUE(queryElt.isABSONObj());
-
- // sort
- BSONElement sortElt = obj.getField("sort");
- ASSERT_TRUE(sortElt.isABSONObj());
-
- // projection
- BSONElement projectionElt = obj.getField("projection");
- ASSERT_TRUE(projectionElt.isABSONObj());
-
- // indexes
- BSONElement indexesElt = obj.getField("indexes");
- ASSERT_EQUALS(indexesElt.type(), mongo::Array);
-
- // All fields OK. Append to vector.
- filters.push_back(obj.getOwned());
- }
+using std::string;
+using std::unique_ptr;
+using std::vector;
- return filters;
- }
+static const char* ns = "test.t";
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
+/**
+ * Utility function to get list of index filters from the query settings.
+ */
+vector<BSONObj> getFilters(const QuerySettings& querySettings) {
+ BSONObjBuilder bob;
+ ASSERT_OK(ListFilters::list(querySettings, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement filtersElt = resultObj.getField("filters");
+ ASSERT_EQUALS(filtersElt.type(), mongo::Array);
+ vector<BSONElement> filtersEltArray = filtersElt.Array();
+ vector<BSONObj> filters;
+ for (vector<BSONElement>::const_iterator i = filtersEltArray.begin();
+ i != filtersEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
+
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
+
+ // Check required fields.
+ // query
+ BSONElement queryElt = obj.getField("query");
+ ASSERT_TRUE(queryElt.isABSONObj());
+
+ // sort
+ BSONElement sortElt = obj.getField("sort");
+ ASSERT_TRUE(sortElt.isABSONObj());
+
+ // projection
+ BSONElement projectionElt = obj.getField("projection");
+ ASSERT_TRUE(projectionElt.isABSONObj());
+
+ // indexes
+ BSONElement indexesElt = obj.getField("indexes");
+ ASSERT_EQUALS(indexesElt.type(), mongo::Array);
+
+ // All fields OK. Append to vector.
+ filters.push_back(obj.getOwned());
}
- /**
- * Injects an entry into plan cache for query shape.
- */
- void addQueryShapeToPlanCache(PlanCache* planCache, const char* queryStr, const char* sortStr,
- const char* projectionStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projectionObj = fromjson(projectionStr);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- QuerySolution qs;
- qs.cacheData.reset(new SolutionCacheData());
- qs.cacheData->tree.reset(new PlanCacheIndexTree());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache->add(*cq, solns, createDecision(1U)));
+ return filters;
+}
+
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
+
+/**
+ * Injects an entry into plan cache for query shape.
+ */
+void addQueryShapeToPlanCache(PlanCache* planCache,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projectionStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projectionObj = fromjson(projectionStr);
+
+ // Create canonical query.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ QuerySolution qs;
+ qs.cacheData.reset(new SolutionCacheData());
+ qs.cacheData->tree.reset(new PlanCacheIndexTree());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ ASSERT_OK(planCache->add(*cq, solns, createDecision(1U)));
+}
- /**
- * Checks if plan cache contains query shape.
- */
- bool planCacheContains(const PlanCache& planCache, const char* queryStr, const char* sortStr,
- const char* projectionStr) {
- BSONObj queryObj = fromjson(queryStr);
- BSONObj sortObj = fromjson(sortStr);
- BSONObj projectionObj = fromjson(projectionStr);
-
- // Create canonical query.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Retrieve cache entries from plan cache.
- vector<PlanCacheEntry*> entries = planCache.getAllEntries();
-
- // Search keys.
- bool found = false;
- for (vector<PlanCacheEntry*>::const_iterator i = entries.begin(); i != entries.end(); i++) {
- PlanCacheEntry* entry = *i;
-
- // Canonicalizing query shape in cache entry to get cache key.
- // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
- ASSERT_OK(CanonicalQuery::canonicalize(ns, entry->query, entry->sort,
- entry->projection, &cqRaw));
- unique_ptr<CanonicalQuery> currentQuery(cqRaw);
-
- if (planCache.computeKey(*currentQuery) == planCache.computeKey(*cq)) {
- found = true;
- }
- // Release resources for cache entry after extracting key.
- delete entry;
+/**
+ * Checks if plan cache contains query shape.
+ */
+bool planCacheContains(const PlanCache& planCache,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projectionStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ BSONObj sortObj = fromjson(sortStr);
+ BSONObj projectionObj = fromjson(projectionStr);
+
+ // Create canonical query.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, queryObj, sortObj, projectionObj, &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Retrieve cache entries from plan cache.
+ vector<PlanCacheEntry*> entries = planCache.getAllEntries();
+
+ // Search keys.
+ bool found = false;
+ for (vector<PlanCacheEntry*>::const_iterator i = entries.begin(); i != entries.end(); i++) {
+ PlanCacheEntry* entry = *i;
+
+ // Canonicalizing query shape in cache entry to get cache key.
+ // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
+ ASSERT_OK(
+ CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw));
+ unique_ptr<CanonicalQuery> currentQuery(cqRaw);
+
+ if (planCache.computeKey(*currentQuery) == planCache.computeKey(*cq)) {
+ found = true;
}
- return found;
+ // Release resources for cache entry after extracting key.
+ delete entry;
}
+ return found;
+}
- /**
- * Tests for ListFilters
- */
+/**
+ * Tests for ListFilters
+ */
- TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
- QuerySettings empty;
- vector<BSONObj> filters = getFilters(empty);
- ASSERT_TRUE(filters.empty());
- }
+TEST(IndexFilterCommandsTest, ListFiltersEmpty) {
+ QuerySettings empty;
+ vector<BSONObj> filters = getFilters(empty);
+ ASSERT_TRUE(filters.empty());
+}
- /**
- * Tests for ClearFilters
- */
-
- TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
- QuerySettings empty;
- PlanCache planCache;
- OperationContextNoop txn;
-
- // If present, query has to be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: 1234}")));
- // If present, sort must be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, sort: 1234}")));
- // If present, projection must be an object.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, projection: 1234}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}}")));
- // Sort present without query is an error.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{sort: {a: 1}}")));
- // Projection present without query is an error.
- ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns,
- fromjson("{projection: {_id: 0, a: 1}}")));
- }
+/**
+ * Tests for ClearFilters
+ */
- TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
- QuerySettings querySettings;
- PlanCache planCache;
- OperationContextNoop txn;
-
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
- vector<BSONObj> filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Clear nonexistent hint.
- // Command should succeed and cache should remain unchanged.
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
- }
+TEST(IndexFilterCommandsTest, ClearFiltersInvalidParameter) {
+ QuerySettings empty;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // If present, query has to be an object.
+ ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: 1234}")));
+ // If present, sort must be an object.
+ ASSERT_NOT_OK(
+ ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, sort: 1234}")));
+ // If present, projection must be an object.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, projection: 1234}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: {$no_such_op: 1}}}")));
+ // Sort present without query is an error.
+ ASSERT_NOT_OK(ClearFilters::clear(&txn, &empty, &planCache, ns, fromjson("{sort: {a: 1}}")));
+ // Projection present without query is an error.
+ ASSERT_NOT_OK(ClearFilters::clear(
+ &txn, &empty, &planCache, ns, fromjson("{projection: {_id: 0, a: 1}}")));
+}
+
+TEST(IndexFilterCommandsTest, ClearNonexistentHint) {
+ QuerySettings querySettings;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
+ vector<BSONObj> filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Clear nonexistent hint.
+ // Command should succeed and cache should remain unchanged.
+ ASSERT_OK(
+ ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+}
- /**
- * Tests for SetFilter
- */
-
- TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
- QuerySettings empty;
- PlanCache planCache;
- OperationContextNoop txn;
-
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{}")));
- // Missing required query field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{indexes: [{a: 1}]}")));
- // Missing required indexes field.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}}")));
- // Query has to be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // Indexes field has to be an array.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: 1234}")));
- // Array indexes field cannot empty.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: []}")));
- // Elements in indexes have to be objects.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
- // Objects in indexes cannot be empty.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
- // If present, sort must be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // If present, projection must be an object.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}, indexes: [{a: 1}, {b: 1}]}")));
- }
+/**
+ * Tests for SetFilter
+ */
- TEST(IndexFilterCommandsTest, SetAndClearFilters) {
- QuerySettings querySettings;
- PlanCache planCache;
- OperationContextNoop txn;
-
- // Inject query shape into plan cache.
- addQueryShapeToPlanCache(&planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
- ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
-
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1}]}")));
- vector<BSONObj> filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Query shape should not exist in plan cache after hint is updated.
- ASSERT_FALSE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
-
- // Fields in filter should match criteria in most recent query settings update.
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
-
- // Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
- // share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1, b: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 1U);
-
- // Add hint for different query shape.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {b: 1}, indexes: [{b: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 2U);
-
- // Add hint for 3rd query shape. This is to prepare for ClearHint tests.
- ASSERT_OK(SetFilter::set(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 3U);
-
- // Add 2 entries to plan cache and check plan cache after clearing one/all filters.
- addQueryShapeToPlanCache(&planCache, "{a: 1}", "{}", "{}");
- addQueryShapeToPlanCache(&planCache, "{b: 1}", "{}", "{}");
-
- // Clear single hint.
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns,
- fromjson("{query: {a: 1}}")));
- filters = getFilters(querySettings);
- ASSERT_EQUALS(filters.size(), 2U);
-
- // Query shape should not exist in plan cache after cleaing 1 hint.
- ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}"));
- ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
-
- // Clear all filters
- ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{}")));
- filters = getFilters(querySettings);
- ASSERT_TRUE(filters.empty());
-
- // {b: 1} should be gone from plan cache after flushing query settings.
- ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
- }
+TEST(IndexFilterCommandsTest, SetFilterInvalidParameter) {
+ QuerySettings empty;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{}")));
+ // Missing required query field.
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{indexes: [{a: 1}]}")));
+ // Missing required indexes field.
+ ASSERT_NOT_OK(SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}}")));
+ // Query has to be an object.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // Indexes field has to be an array.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: 1234}")));
+ // Array indexes field cannot empty.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: []}")));
+ // Elements in indexes have to be objects.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}, 99]}")));
+ // Objects in indexes cannot be empty.
+ ASSERT_NOT_OK(SetFilter::set(
+ &txn, &empty, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}, {}]}")));
+ // If present, sort must be an object.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: 1}, sort: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // If present, projection must be an object.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: 1}, projection: 1234, indexes: [{a: 1}, {b: 1}]}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(
+ SetFilter::set(&txn,
+ &empty,
+ &planCache,
+ ns,
+ fromjson("{query: {a: {$no_such_op: 1}}, indexes: [{a: 1}, {b: 1}]}")));
+}
+
+TEST(IndexFilterCommandsTest, SetAndClearFilters) {
+ QuerySettings querySettings;
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // Inject query shape into plan cache.
+ addQueryShapeToPlanCache(&planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
+ ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
+
+ ASSERT_OK(SetFilter::set(&txn,
+ &querySettings,
+ &planCache,
+ ns,
+ fromjson(
+ "{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1}]}")));
+ vector<BSONObj> filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Query shape should not exist in plan cache after hint is updated.
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
+
+ // Fields in filter should match criteria in most recent query settings update.
+ ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
+ ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
+ ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
+
+ // Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
+ // share same shape) should not change the query settings size.
+ ASSERT_OK(SetFilter::set(&txn,
+ &querySettings,
+ &planCache,
+ ns,
+ fromjson(
+ "{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1, b: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 1U);
+
+ // Add hint for different query shape.
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {b: 1}, indexes: [{b: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 2U);
+
+ // Add hint for 3rd query shape. This is to prepare for ClearHint tests.
+ ASSERT_OK(SetFilter::set(
+ &txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}, indexes: [{a: 1}]}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 3U);
+
+ // Add 2 entries to plan cache and check plan cache after clearing one/all filters.
+ addQueryShapeToPlanCache(&planCache, "{a: 1}", "{}", "{}");
+ addQueryShapeToPlanCache(&planCache, "{b: 1}", "{}", "{}");
+
+ // Clear single hint.
+ ASSERT_OK(
+ ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{query: {a: 1}}")));
+ filters = getFilters(querySettings);
+ ASSERT_EQUALS(filters.size(), 2U);
+
+ // Query shape should not exist in plan cache after cleaing 1 hint.
+ ASSERT_FALSE(planCacheContains(planCache, "{a: 1}", "{}", "{}"));
+ ASSERT_TRUE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
+
+ // Clear all filters
+ ASSERT_OK(ClearFilters::clear(&txn, &querySettings, &planCache, ns, fromjson("{}")));
+ filters = getFilters(querySettings);
+ ASSERT_TRUE(filters.empty());
+
+ // {b: 1} should be gone from plan cache after flushing query settings.
+ ASSERT_FALSE(planCacheContains(planCache, "{b: 1}", "{}", "{}"));
+}
} // namespace
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
index ebec8ae4fdb..91522b4ddb7 100644
--- a/src/mongo/db/commands/isself.cpp
+++ b/src/mongo/db/commands/isself.cpp
@@ -36,36 +36,40 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class IsSelfCommand : public Command {
- public:
- IsSelfCommand() : Command("_isSelf") {}
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "{ _isSelf : 1 } INTERNAL ONLY";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {} // No auth required
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result.append( "id" , repl::instanceId );
- return true;
- }
- };
-
- MONGO_INITIALIZER_WITH_PREREQUISITES(RegisterIsSelfCommand, ("GenerateInstanceId"))
- (InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed
- new IsSelfCommand();
- return Status::OK();
+class IsSelfCommand : public Command {
+public:
+ IsSelfCommand() : Command("_isSelf") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ _isSelf : 1 } INTERNAL ONLY";
}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {} // No auth required
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ result.append("id", repl::instanceId);
+ return true;
+ }
+};
+
+MONGO_INITIALIZER_WITH_PREREQUISITES(RegisterIsSelfCommand, ("GenerateInstanceId"))
+(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed
+ new IsSelfCommand();
+ return Status::OK();
+}
} // namespace mongo
diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp
index efa43986d0d..bd0555b9364 100644
--- a/src/mongo/db/commands/kill_op.cpp
+++ b/src/mongo/db/commands/kill_op.cpp
@@ -45,46 +45,49 @@
namespace mongo {
- class KillOpCommand : public Command {
- public:
-
- KillOpCommand() : Command("killOp") {}
-
- bool isWriteCommandForConfigServer() const final { return false; }
-
- bool slaveOk() const final { return true; }
-
- bool adminOnly() const final { return true; }
-
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
-
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::killop);
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) final {
-
- long long op;
- uassertStatusOK(bsonExtractIntegerField(cmdObj, "op", &op));
-
- log() << "going to kill op: " << op;
- result.append("info", "attempting to kill op");
-
- uassert(26823, str::stream() << "invalid op : " << op,
- (op >= 0) && (op <= std::numeric_limits<unsigned int>::max()));
-
- getGlobalServiceContext()->killOperation(static_cast<unsigned int>(op));
- return true;
- }
- } killOpCmd;
+class KillOpCommand : public Command {
+public:
+ KillOpCommand() : Command("killOp") {}
+
+ bool isWriteCommandForConfigServer() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::killop);
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ long long op;
+ uassertStatusOK(bsonExtractIntegerField(cmdObj, "op", &op));
+
+ log() << "going to kill op: " << op;
+ result.append("info", "attempting to kill op");
+
+ uassert(26823,
+ str::stream() << "invalid op : " << op,
+ (op >= 0) && (op <= std::numeric_limits<unsigned int>::max()));
+
+ getGlobalServiceContext()->killOperation(static_cast<unsigned int>(op));
+ return true;
+ }
+} killOpCmd;
} // namespace mongo
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 9c4789e3cd3..b48e98598ac 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -48,165 +48,165 @@
namespace mongo {
- using std::unique_ptr;
- using std::list;
- using std::string;
- using std::stringstream;
-
- class CmdListCollections : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help( stringstream& help ) const { help << "list collections for this db"; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
-
- // Check for the listCollections ActionType on the database
- // or find on system.namespaces for pre 3.0 systems.
- if (authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname),
- ActionType::listCollections) ||
- authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(dbname, "system.namespaces")),
- ActionType::find)) {
- return Status::OK();
+using std::unique_ptr;
+using std::list;
+using std::string;
+using std::stringstream;
+
+class CmdListCollections : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "list collections for this db";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+
+ // Check for the listCollections ActionType on the database
+ // or find on system.namespaces for pre 3.0 systems.
+ if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::listCollections) ||
+ authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.namespaces")),
+ ActionType::find)) {
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to create users on db: " << dbname);
+ }
+
+ CmdListCollections() : Command("listCollections") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ std::unique_ptr<MatchExpression> matcher;
+ BSONElement filterElt = jsobj["filter"];
+ if (!filterElt.eoo()) {
+ if (filterElt.type() != mongo::Object) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "\"filter\" must be an object"));
+ }
+ StatusWithMatchExpression statusWithMatcher =
+ MatchExpressionParser::parse(filterElt.Obj());
+ if (!statusWithMatcher.isOK()) {
+ return appendCommandStatus(result, statusWithMatcher.getStatus());
}
+ matcher.reset(statusWithMatcher.getValue());
+ }
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: " <<
- dbname);
+ const long long defaultBatchSize = std::numeric_limits<long long>::max();
+ long long batchSize;
+ Status parseCursorStatus = parseCommandCursorOptions(jsobj, defaultBatchSize, &batchSize);
+ if (!parseCursorStatus.isOK()) {
+ return appendCommandStatus(result, parseCursorStatus);
}
- CmdListCollections() : Command( "listCollections" ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- std::unique_ptr<MatchExpression> matcher;
- BSONElement filterElt = jsobj["filter"];
- if (!filterElt.eoo()) {
- if (filterElt.type() != mongo::Object) {
- return appendCommandStatus(result, Status(ErrorCodes::BadValue,
- "\"filter\" must be an object"));
- }
- StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(filterElt.Obj());
- if (!statusWithMatcher.isOK()) {
- return appendCommandStatus(result, statusWithMatcher.getStatus());
- }
- matcher.reset(statusWithMatcher.getValue());
- }
+ ScopedTransaction scopedXact(txn, MODE_IS);
+ AutoGetDb autoDb(txn, dbname, MODE_S);
- const long long defaultBatchSize = std::numeric_limits<long long>::max();
- long long batchSize;
- Status parseCursorStatus = parseCommandCursorOptions(jsobj,
- defaultBatchSize,
- &batchSize);
- if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
- }
+ const Database* d = autoDb.getDb();
+ const DatabaseCatalogEntry* dbEntry = NULL;
- ScopedTransaction scopedXact(txn, MODE_IS);
- AutoGetDb autoDb(txn, dbname, MODE_S);
+ list<string> names;
+ if (d) {
+ dbEntry = d->getDatabaseCatalogEntry();
+ dbEntry->getCollectionNamespaces(&names);
+ names.sort();
+ }
- const Database* d = autoDb.getDb();
- const DatabaseCatalogEntry* dbEntry = NULL;
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
- list<string> names;
- if ( d ) {
- dbEntry = d->getDatabaseCatalogEntry();
- dbEntry->getCollectionNamespaces( &names );
- names.sort();
- }
+ for (std::list<std::string>::const_iterator i = names.begin(); i != names.end(); ++i) {
+ const std::string& ns = *i;
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
-
- for (std::list<std::string>::const_iterator i = names.begin();
- i != names.end();
- ++i) {
- const std::string& ns = *i;
-
- StringData collection = nsToCollectionSubstring( ns );
- if ( collection == "system.namespaces" ) {
- continue;
- }
-
- BSONObjBuilder b;
- b.append( "name", collection );
-
- CollectionOptions options =
- dbEntry->getCollectionCatalogEntry( ns )->getCollectionOptions(txn);
- b.append( "options", options.toBSON() );
-
- BSONObj maybe = b.obj();
- if ( matcher && !matcher->matchesBSON( maybe ) ) {
- continue;
- }
-
- WorkingSetMember member;
- member.state = WorkingSetMember::OWNED_OBJ;
- member.keyData.clear();
- member.loc = RecordId();
- member.obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
- root->pushBack(member);
+ StringData collection = nsToCollectionSubstring(ns);
+ if (collection == "system.namespaces") {
+ continue;
}
- std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name;
- dassert(NamespaceString(cursorNamespace).isValid());
- dassert(NamespaceString(cursorNamespace).isListCollectionsGetMore());
-
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus( result, makeStatus );
- }
+ BSONObjBuilder b;
+ b.append("name", collection);
- BSONArrayBuilder firstBatch;
-
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- for (long long objCount = 0;
- objCount < batchSize && firstBatch.len() < byteLimit;
- objCount++) {
- BSONObj next;
- PlanExecutor::ExecState state = exec->getNext(&next, NULL);
- if ( state == PlanExecutor::IS_EOF ) {
- break;
- }
- invariant( state == PlanExecutor::ADVANCED );
- firstBatch.append(next);
- }
+ CollectionOptions options =
+ dbEntry->getCollectionCatalogEntry(ns)->getCollectionOptions(txn);
+ b.append("options", options.toBSON());
- CursorId cursorId = 0LL;
- if ( !exec->isEOF() ) {
- exec->saveState();
- ClientCursor* cursor = new ClientCursor(CursorManager::getGlobalCursorManager(),
- exec.release(),
- cursorNamespace);
- cursorId = cursor->cursorid();
+ BSONObj maybe = b.obj();
+ if (matcher && !matcher->matchesBSON(maybe)) {
+ continue;
}
- appendCursorResponseObject( cursorId, cursorNamespace, firstBatch.arr(), &result );
+ WorkingSetMember member;
+ member.state = WorkingSetMember::OWNED_OBJ;
+ member.keyData.clear();
+ member.loc = RecordId();
+ member.obj = Snapshotted<BSONObj>(SnapshotId(), maybe);
+ root->pushBack(member);
+ }
- return true;
+ std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name;
+ dassert(NamespaceString(cursorNamespace).isValid());
+ dassert(NamespaceString(cursorNamespace).isListCollectionsGetMore());
+
+ PlanExecutor* rawExec;
+ Status makeStatus = PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ cursorNamespace,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+ if (!makeStatus.isOK()) {
+ return appendCommandStatus(result, makeStatus);
}
- } cmdListCollections;
+ BSONArrayBuilder firstBatch;
+
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ for (long long objCount = 0; objCount < batchSize && firstBatch.len() < byteLimit;
+ objCount++) {
+ BSONObj next;
+ PlanExecutor::ExecState state = exec->getNext(&next, NULL);
+ if (state == PlanExecutor::IS_EOF) {
+ break;
+ }
+ invariant(state == PlanExecutor::ADVANCED);
+ firstBatch.append(next);
+ }
+
+ CursorId cursorId = 0LL;
+ if (!exec->isEOF()) {
+ exec->saveState();
+ ClientCursor* cursor = new ClientCursor(
+ CursorManager::getGlobalCursorManager(), exec.release(), cursorNamespace);
+ cursorId = cursor->cursorid();
+ }
+
+ appendCursorResponseObject(cursorId, cursorNamespace, firstBatch.arr(), &result);
+
+ return true;
+ }
+} cmdListCollections;
}
diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp
index 81779a83a76..886c035d076 100644
--- a/src/mongo/db/commands/list_databases.cpp
+++ b/src/mongo/db/commands/list_databases.cpp
@@ -38,85 +38,87 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- // XXX: remove and put into storage api
- intmax_t dbSize( const string& database );
-
- class CmdListDatabases : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
- virtual bool slaveOverrideOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return true;
- }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream& help ) const { help << "list databases on this server"; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::listDatabases);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- CmdListDatabases() : Command("listDatabases" , true ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& jsobj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- vector< string > dbNames;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases( &dbNames );
-
- vector< BSONObj > dbInfos;
-
- set<string> seen;
- intmax_t totalSize = 0;
- for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
- const string& dbname = *i;
-
- BSONObjBuilder b;
- b.append( "name", dbname );
-
- {
- ScopedTransaction transaction(txn, MODE_IS);
- Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
-
- Database* db = dbHolder().get( txn, dbname );
- if ( !db )
- continue;
-
- const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
- invariant( entry );
-
- int64_t size = entry->sizeOnDisk( txn );
- b.append( "sizeOnDisk", static_cast<double>( size ) );
- totalSize += size;
-
- b.appendBool("empty", size == 0);
- }
-
- dbInfos.push_back( b.obj() );
-
- seen.insert( i->c_str() );
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+// XXX: remove and put into storage api
+intmax_t dbSize(const string& database);
+
+class CmdListDatabases : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "list databases on this server";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listDatabases);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ CmdListDatabases() : Command("listDatabases", true) {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ vector<string> dbNames;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&dbNames);
+
+ vector<BSONObj> dbInfos;
+
+ set<string> seen;
+ intmax_t totalSize = 0;
+ for (vector<string>::iterator i = dbNames.begin(); i != dbNames.end(); ++i) {
+ const string& dbname = *i;
+
+ BSONObjBuilder b;
+ b.append("name", dbname);
+
+ {
+ ScopedTransaction transaction(txn, MODE_IS);
+ Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS);
+
+ Database* db = dbHolder().get(txn, dbname);
+ if (!db)
+ continue;
+
+ const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry();
+ invariant(entry);
+
+ int64_t size = entry->sizeOnDisk(txn);
+ b.append("sizeOnDisk", static_cast<double>(size));
+ totalSize += size;
+
+ b.appendBool("empty", size == 0);
}
- result.append( "databases", dbInfos );
- result.append( "totalSize", double( totalSize ) );
- return true;
+ dbInfos.push_back(b.obj());
+
+ seen.insert(i->c_str());
}
- } cmdListDatabases;
+ result.append("databases", dbInfos);
+ result.append("totalSize", double(totalSize));
+ return true;
+ }
+} cmdListDatabases;
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index f020f87c43d..ce0394156e3 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -48,159 +48,163 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /**
- * Lists the indexes for a given collection.
- *
- * Format:
- * {
- * listIndexes: <collection name>
- * }
- *
- * Return format:
- * {
- * indexes: [
- * ...
- * ]
- * }
- */
- class CmdListIndexes : public Command {
- public:
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool adminOnly() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help( stringstream& help ) const { help << "list indexes for a collection"; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::listIndexes);
- out->push_back(Privilege(parseResourcePattern( dbname, cmdObj ), actions));
- }
-
- CmdListIndexes() : Command( "listIndexes" ) {}
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
+using std::string;
+using std::stringstream;
+using std::vector;
- BSONElement first = cmdObj.firstElement();
- uassert(
- 28528,
+/**
+ * Lists the indexes for a given collection.
+ *
+ * Format:
+ * {
+ * listIndexes: <collection name>
+ * }
+ *
+ * Return format:
+ * {
+ * indexes: [
+ * ...
+ * ]
+ * }
+ */
+class CmdListIndexes : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& help) const {
+ help << "list indexes for a collection";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listIndexes);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ CmdListIndexes() : Command("listIndexes") {}
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONElement first = cmdObj.firstElement();
+ uassert(28528,
str::stream() << "Argument to listIndexes must be of type String, not "
<< typeName(first.type()),
first.type() == String);
- StringData collectionName = first.valueStringData();
- uassert(
- 28529,
+ StringData collectionName = first.valueStringData();
+ uassert(28529,
str::stream() << "Argument to listIndexes must be a collection name, "
<< "not the empty string",
!collectionName.empty());
- const NamespaceString ns(dbname, collectionName);
-
- const long long defaultBatchSize = std::numeric_limits<long long>::max();
- long long batchSize;
- Status parseCursorStatus = parseCommandCursorOptions(cmdObj,
- defaultBatchSize,
- &batchSize);
- if (!parseCursorStatus.isOK()) {
- return appendCommandStatus(result, parseCursorStatus);
- }
+ const NamespaceString ns(dbname, collectionName);
- AutoGetCollectionForRead autoColl(txn, ns);
- if (!autoColl.getDb()) {
- return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
- "no database" ) );
- }
+ const long long defaultBatchSize = std::numeric_limits<long long>::max();
+ long long batchSize;
+ Status parseCursorStatus = parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize);
+ if (!parseCursorStatus.isOK()) {
+ return appendCommandStatus(result, parseCursorStatus);
+ }
- const Collection* collection = autoColl.getCollection();
- if (!collection) {
- return appendCommandStatus( result, Status( ErrorCodes::NamespaceNotFound,
- "no collection" ) );
- }
+ AutoGetCollectionForRead autoColl(txn, ns);
+ if (!autoColl.getDb()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound, "no database"));
+ }
+
+ const Collection* collection = autoColl.getCollection();
+ if (!collection) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound, "no collection"));
+ }
- const CollectionCatalogEntry* cce = collection->getCatalogEntry();
- invariant(cce);
+ const CollectionCatalogEntry* cce = collection->getCatalogEntry();
+ invariant(cce);
- vector<string> indexNames;
+ vector<string> indexNames;
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ indexNames.clear();
+ cce->getAllIndexes(txn, &indexNames);
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ BSONObj indexSpec;
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexNames.clear();
- cce->getAllIndexes( txn, &indexNames );
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
-
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
-
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- BSONObj indexSpec;
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- indexSpec = cce->getIndexSpec( txn, indexNames[i] );
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
-
- WorkingSetMember member;
- member.state = WorkingSetMember::OWNED_OBJ;
- member.keyData.clear();
- member.loc = RecordId();
- member.obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
- root->pushBack(member);
+ indexSpec = cce->getIndexSpec(txn, indexNames[i]);
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
+
+ WorkingSetMember member;
+ member.state = WorkingSetMember::OWNED_OBJ;
+ member.keyData.clear();
+ member.loc = RecordId();
+ member.obj = Snapshotted<BSONObj>(SnapshotId(), indexSpec.getOwned());
+ root->pushBack(member);
+ }
- std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name << "."
- << ns.coll();
- dassert(NamespaceString(cursorNamespace).isValid());
- dassert(NamespaceString(cursorNamespace).isListIndexesGetMore());
- dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexesGetMore());
-
- PlanExecutor* rawExec;
- Status makeStatus = PlanExecutor::make(txn,
- ws.release(),
- root.release(),
- cursorNamespace,
- PlanExecutor::YIELD_MANUAL,
- &rawExec);
- std::unique_ptr<PlanExecutor> exec(rawExec);
- if (!makeStatus.isOK()) {
- return appendCommandStatus( result, makeStatus );
- }
+ std::string cursorNamespace = str::stream() << dbname << ".$cmd." << name << "."
+ << ns.coll();
+ dassert(NamespaceString(cursorNamespace).isValid());
+ dassert(NamespaceString(cursorNamespace).isListIndexesGetMore());
+ dassert(ns == NamespaceString(cursorNamespace).getTargetNSForListIndexesGetMore());
+
+ PlanExecutor* rawExec;
+ Status makeStatus = PlanExecutor::make(txn,
+ ws.release(),
+ root.release(),
+ cursorNamespace,
+ PlanExecutor::YIELD_MANUAL,
+ &rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+ if (!makeStatus.isOK()) {
+ return appendCommandStatus(result, makeStatus);
+ }
- BSONArrayBuilder firstBatch;
-
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- for (long long objCount = 0;
- objCount < batchSize && firstBatch.len() < byteLimit;
- objCount++) {
- BSONObj next;
- PlanExecutor::ExecState state = exec->getNext(&next, NULL);
- if ( state == PlanExecutor::IS_EOF ) {
- break;
- }
- invariant( state == PlanExecutor::ADVANCED );
- firstBatch.append(next);
- }
+ BSONArrayBuilder firstBatch;
- CursorId cursorId = 0LL;
- if ( !exec->isEOF() ) {
- exec->saveState();
- ClientCursor* cursor = new ClientCursor(CursorManager::getGlobalCursorManager(),
- exec.release(),
- cursorNamespace);
- cursorId = cursor->cursorid();
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ for (long long objCount = 0; objCount < batchSize && firstBatch.len() < byteLimit;
+ objCount++) {
+ BSONObj next;
+ PlanExecutor::ExecState state = exec->getNext(&next, NULL);
+ if (state == PlanExecutor::IS_EOF) {
+ break;
}
+ invariant(state == PlanExecutor::ADVANCED);
+ firstBatch.append(next);
+ }
- appendCursorResponseObject( cursorId, cursorNamespace, firstBatch.arr(), &result );
-
- return true;
+ CursorId cursorId = 0LL;
+ if (!exec->isEOF()) {
+ exec->saveState();
+ ClientCursor* cursor = new ClientCursor(
+ CursorManager::getGlobalCursorManager(), exec.release(), cursorNamespace);
+ cursorId = cursor->cursorid();
}
- } cmdListIndexes;
+ appendCursorResponseObject(cursorId, cursorNamespace, firstBatch.arr(), &result);
+
+ return true;
+ }
+} cmdListIndexes;
}
diff --git a/src/mongo/db/commands/merge_chunks_cmd.cpp b/src/mongo/db/commands/merge_chunks_cmd.cpp
index 87721b11469..1ee9d397dd7 100644
--- a/src/mongo/db/commands/merge_chunks_cmd.cpp
+++ b/src/mongo/db/commands/merge_chunks_cmd.cpp
@@ -38,145 +38,151 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
-
- /**
- * Mongod-side command for merging chunks.
- */
- class MergeChunksCommand : public Command {
- public:
- MergeChunksCommand() : Command("mergeChunks") {}
-
- virtual void help(stringstream& h) const {
- h << "Merge Chunks command\n"
- << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ],"
- << " (opt) epoch : <epoch>, (opt) config : <configdb string>,"
- << " (opt) shardName : <shard name> }";
- }
+using std::string;
+using std::stringstream;
+using std::vector;
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
- ActionType::splitChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
- }
+/**
+ * Mongod-side command for merging chunks.
+ */
+class MergeChunksCommand : public Command {
+public:
+ MergeChunksCommand() : Command("mergeChunks") {}
+
+ virtual void help(stringstream& h) const {
+ h << "Merge Chunks command\n"
+ << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ],"
+ << " (opt) epoch : <epoch>, (opt) config : <configdb string>,"
+ << " (opt) shardName : <shard name> }";
+ }
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::splitChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
+ return Status::OK();
+ }
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- // Required
- static BSONField<string> nsField;
- static BSONField<vector<BSONObj> > boundsField;
- // Optional, if the merge is only valid for a particular epoch
- static BSONField<OID> epochField;
- // Optional, if our sharding state has not previously been initializeed
- static BSONField<string> shardNameField;
- static BSONField<string> configField;
-
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- string ns = parseNs(dbname, cmdObj);
-
- if ( ns.size() == 0 ) {
- errmsg = "no namespace specified";
- return false;
- }
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
- vector<BSONObj> bounds;
- if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
- return false;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if ( bounds.size() == 0 ) {
- errmsg = "no bounds were specified";
- return false;
- }
+ // Required
+ static BSONField<string> nsField;
+ static BSONField<vector<BSONObj>> boundsField;
+ // Optional, if the merge is only valid for a particular epoch
+ static BSONField<OID> epochField;
+ // Optional, if our sharding state has not previously been initializeed
+ static BSONField<string> shardNameField;
+ static BSONField<string> configField;
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
+
+ if (ns.size() == 0) {
+ errmsg = "no namespace specified";
+ return false;
+ }
- if ( bounds.size() != 2 ) {
- errmsg = "only a min and max bound may be specified";
- return false;
- }
+ vector<BSONObj> bounds;
+ if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
+ return false;
+ }
- BSONObj minKey = bounds[0];
- BSONObj maxKey = bounds[1];
+ if (bounds.size() == 0) {
+ errmsg = "no bounds were specified";
+ return false;
+ }
- if ( minKey.isEmpty() ) {
- errmsg = "no min key specified";
- return false;
- }
+ if (bounds.size() != 2) {
+ errmsg = "only a min and max bound may be specified";
+ return false;
+ }
- if ( maxKey.isEmpty() ) {
- errmsg = "no max key specified";
- return false;
- }
+ BSONObj minKey = bounds[0];
+ BSONObj maxKey = bounds[1];
- //
- // This might be the first call from mongos, so we may need to pass the config and shard
- // information to initialize the shardingState.
- //
-
- string config;
- FieldParser::FieldState extracted = FieldParser::extract( cmdObj,
- configField,
- &config,
- &errmsg );
- if (!shardingState.enabled()) {
- if (!extracted || extracted == FieldParser::FIELD_NONE) {
- errmsg = "sharding state must be enabled or "
- "config server specified to merge chunks";
- return false;
- }
-
- ShardingState::initialize(config);
- }
+ if (minKey.isEmpty()) {
+ errmsg = "no min key specified";
+ return false;
+ }
- // ShardName is optional, but might not be set yet
- string shardName;
- extracted = FieldParser::extract( cmdObj, shardNameField, &shardName, &errmsg );
+ if (maxKey.isEmpty()) {
+ errmsg = "no max key specified";
+ return false;
+ }
- if ( !extracted ) return false;
- if ( extracted != FieldParser::FIELD_NONE ) {
- shardingState.gotShardName( shardName );
+ //
+ // This might be the first call from mongos, so we may need to pass the config and shard
+ // information to initialize the shardingState.
+ //
+
+ string config;
+ FieldParser::FieldState extracted =
+ FieldParser::extract(cmdObj, configField, &config, &errmsg);
+ if (!shardingState.enabled()) {
+ if (!extracted || extracted == FieldParser::FIELD_NONE) {
+ errmsg =
+ "sharding state must be enabled or "
+ "config server specified to merge chunks";
+ return false;
}
- //
- // Epoch is optional, and if not set indicates we should use the latest epoch
- //
+ ShardingState::initialize(config);
+ }
- OID epoch;
- if ( !FieldParser::extract( cmdObj, epochField, &epoch, &errmsg ) ) {
- return false;
- }
+ // ShardName is optional, but might not be set yet
+ string shardName;
+ extracted = FieldParser::extract(cmdObj, shardNameField, &shardName, &errmsg);
- return mergeChunks( txn, NamespaceString( ns ), minKey, maxKey, epoch, &errmsg );
+ if (!extracted)
+ return false;
+ if (extracted != FieldParser::FIELD_NONE) {
+ shardingState.gotShardName(shardName);
}
- };
- BSONField<string> MergeChunksCommand::nsField( "mergeChunks" );
- BSONField<vector<BSONObj> > MergeChunksCommand::boundsField( "bounds" );
+ //
+ // Epoch is optional, and if not set indicates we should use the latest epoch
+ //
- BSONField<string> MergeChunksCommand::configField( "config" );
- BSONField<string> MergeChunksCommand::shardNameField( "shardName" );
- BSONField<OID> MergeChunksCommand::epochField( "epoch" );
+ OID epoch;
+ if (!FieldParser::extract(cmdObj, epochField, &epoch, &errmsg)) {
+ return false;
+ }
- MONGO_INITIALIZER(InitMergeChunksCommand)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new MergeChunksCommand();
- return Status::OK();
+ return mergeChunks(txn, NamespaceString(ns), minKey, maxKey, epoch, &errmsg);
}
+};
+
+BSONField<string> MergeChunksCommand::nsField("mergeChunks");
+BSONField<vector<BSONObj>> MergeChunksCommand::boundsField("bounds");
+
+BSONField<string> MergeChunksCommand::configField("config");
+BSONField<string> MergeChunksCommand::shardNameField("shardName");
+BSONField<OID> MergeChunksCommand::epochField("epoch");
+
+MONGO_INITIALIZER(InitMergeChunksCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new MergeChunksCommand();
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index d88f05f733f..59eca8ae4c4 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -71,882 +71,848 @@
namespace mongo {
- using std::endl;
- using std::set;
- using std::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
- using std::vector;
+using std::endl;
+using std::set;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
+using std::vector;
- namespace mr {
+namespace mr {
- AtomicUInt32 Config::JOB_NUMBER;
+AtomicUInt32 Config::JOB_NUMBER;
- JSFunction::JSFunction( const std::string& type , const BSONElement& e ) {
- _type = type;
- _code = e._asCode();
+JSFunction::JSFunction(const std::string& type, const BSONElement& e) {
+ _type = type;
+ _code = e._asCode();
- if ( e.type() == CodeWScope )
- _wantedScope = e.codeWScopeObject();
- }
+ if (e.type() == CodeWScope)
+ _wantedScope = e.codeWScopeObject();
+}
- void JSFunction::init( State * state ) {
- _scope = state->scope();
- verify( _scope );
- _scope->init( &_wantedScope );
+void JSFunction::init(State* state) {
+ _scope = state->scope();
+ verify(_scope);
+ _scope->init(&_wantedScope);
- _func = _scope->createFunction( _code.c_str() );
- uassert( 13598 , str::stream() << "couldn't compile code for: " << _type , _func );
+ _func = _scope->createFunction(_code.c_str());
+ uassert(13598, str::stream() << "couldn't compile code for: " << _type, _func);
- // install in JS scope so that it can be called in JS mode
- _scope->setFunction(_type.c_str(), _code.c_str());
- }
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
+}
- void JSMapper::init( State * state ) {
- _func.init( state );
- _params = state->config().mapParams;
- }
+void JSMapper::init(State* state) {
+ _func.init(state);
+ _params = state->config().mapParams;
+}
- /**
- * Applies the map function to an object, which should internally call emit()
- */
- void JSMapper::map( const BSONObj& o ) {
- Scope * s = _func.scope();
- verify( s );
- if (s->invoke(_func.func(), &_params, &o, 0, true))
- uasserted(9014, str::stream() << "map invoke failed: " << s->getError());
- }
+/**
+ * Applies the map function to an object, which should internally call emit()
+ */
+void JSMapper::map(const BSONObj& o) {
+ Scope* s = _func.scope();
+ verify(s);
+ if (s->invoke(_func.func(), &_params, &o, 0, true))
+ uasserted(9014, str::stream() << "map invoke failed: " << s->getError());
+}
- /**
- * Applies the finalize function to a tuple obj (key, val)
- * Returns tuple obj {_id: key, value: newval}
- */
- BSONObj JSFinalizer::finalize( const BSONObj& o ) {
- Scope * s = _func.scope();
-
- Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
- s->invokeSafe( _func.func() , &o, 0 );
-
- // don't want to use o.objsize() to size b
- // since there are many cases where the point of finalize
- // is converting many fields to 1
- BSONObjBuilder b;
- b.append( o.firstElement() );
- s->append( b , "value" , "__returnValue" );
- return b.obj();
- }
+/**
+ * Applies the finalize function to a tuple obj (key, val)
+ * Returns tuple obj {_id: key, value: newval}
+ */
+BSONObj JSFinalizer::finalize(const BSONObj& o) {
+ Scope* s = _func.scope();
+
+ Scope::NoDBAccess no = s->disableDBAccess("can't access db inside finalize");
+ s->invokeSafe(_func.func(), &o, 0);
+
+ // don't want to use o.objsize() to size b
+ // since there are many cases where the point of finalize
+ // is converting many fields to 1
+ BSONObjBuilder b;
+ b.append(o.firstElement());
+ s->append(b, "value", "__returnValue");
+ return b.obj();
+}
- void JSReducer::init( State * state ) {
- _func.init( state );
- }
+void JSReducer::init(State* state) {
+ _func.init(state);
+}
- /**
- * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
- */
- BSONObj JSReducer::reduce( const BSONList& tuples ) {
- if (tuples.size() <= 1)
- return tuples[0];
- BSONObj key;
- int endSizeEstimate = 16;
- _reduce( tuples , key , endSizeEstimate );
-
- BSONObjBuilder b(endSizeEstimate);
- b.appendAs( key.firstElement() , "0" );
- _func.scope()->append( b , "1" , "__returnValue" );
- return b.obj();
- }
+/**
+ * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
+ */
+BSONObj JSReducer::reduce(const BSONList& tuples) {
+ if (tuples.size() <= 1)
+ return tuples[0];
+ BSONObj key;
+ int endSizeEstimate = 16;
+ _reduce(tuples, key, endSizeEstimate);
+
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs(key.firstElement(), "0");
+ _func.scope()->append(b, "1", "__returnValue");
+ return b.obj();
+}
- /**
- * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
- * Also applies a finalizer method if present.
- */
- BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) {
+/**
+ * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
+ * Also applies a finalizer method if present.
+ */
+BSONObj JSReducer::finalReduce(const BSONList& tuples, Finalizer* finalizer) {
+ BSONObj res;
+ BSONObj key;
+
+ if (tuples.size() == 1) {
+ // 1 obj, just use it
+ key = tuples[0];
+ BSONObjBuilder b(key.objsize());
+ BSONObjIterator it(key);
+ b.appendAs(it.next(), "_id");
+ b.appendAs(it.next(), "value");
+ res = b.obj();
+ } else {
+ // need to reduce
+ int endSizeEstimate = 16;
+ _reduce(tuples, key, endSizeEstimate);
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs(key.firstElement(), "_id");
+ _func.scope()->append(b, "value", "__returnValue");
+ res = b.obj();
+ }
- BSONObj res;
- BSONObj key;
-
- if (tuples.size() == 1) {
- // 1 obj, just use it
- key = tuples[0];
- BSONObjBuilder b(key.objsize());
- BSONObjIterator it(key);
- b.appendAs( it.next() , "_id" );
- b.appendAs( it.next() , "value" );
- res = b.obj();
- }
- else {
- // need to reduce
- int endSizeEstimate = 16;
- _reduce( tuples , key , endSizeEstimate );
- BSONObjBuilder b(endSizeEstimate);
- b.appendAs( key.firstElement() , "_id" );
- _func.scope()->append( b , "value" , "__returnValue" );
- res = b.obj();
- }
+ if (finalizer) {
+ res = finalizer->finalize(res);
+ }
- if ( finalizer ) {
- res = finalizer->finalize( res );
- }
+ return res;
+}
- return res;
+/**
+ * actually applies a reduce, to a list of tuples (key, value).
+ * After the call, tuples will hold a single tuple {"0": key, "1": value}
+ */
+void JSReducer::_reduce(const BSONList& tuples, BSONObj& key, int& endSizeEstimate) {
+ uassert(10074, "need values", tuples.size());
+
+ int sizeEstimate = (tuples.size() * tuples.begin()->getField("value").size()) + 128;
+
+ // need to build the reduce args: ( key, [values] )
+ BSONObjBuilder reduceArgs(sizeEstimate);
+ std::unique_ptr<BSONArrayBuilder> valueBuilder;
+ unsigned n = 0;
+ for (; n < tuples.size(); n++) {
+ BSONObjIterator j(tuples[n]);
+ BSONElement keyE = j.next();
+ if (n == 0) {
+ reduceArgs.append(keyE);
+ key = keyE.wrap();
+ valueBuilder.reset(new BSONArrayBuilder(reduceArgs.subarrayStart("tuples")));
}
- /**
- * actually applies a reduce, to a list of tuples (key, value).
- * After the call, tuples will hold a single tuple {"0": key, "1": value}
- */
- void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
- uassert( 10074 , "need values" , tuples.size() );
-
- int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
-
- // need to build the reduce args: ( key, [values] )
- BSONObjBuilder reduceArgs( sizeEstimate );
- std::unique_ptr<BSONArrayBuilder> valueBuilder;
- unsigned n = 0;
- for ( ; n<tuples.size(); n++ ) {
- BSONObjIterator j(tuples[n]);
- BSONElement keyE = j.next();
- if ( n == 0 ) {
- reduceArgs.append( keyE );
- key = keyE.wrap();
- valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
- }
-
- BSONElement ee = j.next();
-
- uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
+ BSONElement ee = j.next();
- // If adding this element to the array would cause it to be too large, break. The
- // remainder of the tuples will be processed recursively at the end of this
- // function.
- if ( valueBuilder->len() + ee.size() > BSONObjMaxUserSize ) {
- verify( n > 1 ); // if not, inf. loop
- break;
- }
+ uassert(13070, "value too large to reduce", ee.size() < (BSONObjMaxUserSize / 2));
- valueBuilder->append( ee );
- }
- verify(valueBuilder);
- valueBuilder->done();
- BSONObj args = reduceArgs.obj();
+ // If adding this element to the array would cause it to be too large, break. The
+ // remainder of the tuples will be processed recursively at the end of this
+ // function.
+ if (valueBuilder->len() + ee.size() > BSONObjMaxUserSize) {
+ verify(n > 1); // if not, inf. loop
+ break;
+ }
- Scope * s = _func.scope();
+ valueBuilder->append(ee);
+ }
+ verify(valueBuilder);
+ valueBuilder->done();
+ BSONObj args = reduceArgs.obj();
- s->invokeSafe(_func.func(), &args, 0);
- ++numReduces;
+ Scope* s = _func.scope();
- if ( s->type( "__returnValue" ) == Array ) {
- uasserted( 10075 , "reduce -> multiple not supported yet");
- return;
- }
+ s->invokeSafe(_func.func(), &args, 0);
+ ++numReduces;
- endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
+ if (s->type("__returnValue") == Array) {
+ uasserted(10075, "reduce -> multiple not supported yet");
+ return;
+ }
- if ( n == tuples.size() )
- return;
+ endSizeEstimate = key.objsize() + (args.objsize() / tuples.size());
- // the input list was too large, add the rest of elmts to new tuples and reduce again
- // note: would be better to use loop instead of recursion to avoid stack overflow
- BSONList x;
- for ( ; n < tuples.size(); n++ ) {
- x.push_back( tuples[n] );
- }
- BSONObjBuilder temp( endSizeEstimate );
- temp.append( key.firstElement() );
- s->append( temp , "1" , "__returnValue" );
- x.push_back( temp.obj() );
- _reduce( x , key , endSizeEstimate );
- }
+ if (n == tuples.size())
+ return;
- Config::Config( const string& _dbname , const BSONObj& cmdObj )
- {
- dbname = _dbname;
- ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+ // the input list was too large, add the rest of elmts to new tuples and reduce again
+ // note: would be better to use loop instead of recursion to avoid stack overflow
+ BSONList x;
+ for (; n < tuples.size(); n++) {
+ x.push_back(tuples[n]);
+ }
+ BSONObjBuilder temp(endSizeEstimate);
+ temp.append(key.firstElement());
+ s->append(temp, "1", "__returnValue");
+ x.push_back(temp.obj());
+ _reduce(x, key, endSizeEstimate);
+}
- verbose = cmdObj["verbose"].trueValue();
- jsMode = cmdObj["jsMode"].trueValue();
- splitInfo = 0;
+Config::Config(const string& _dbname, const BSONObj& cmdObj) {
+ dbname = _dbname;
+ ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
- if (cmdObj.hasField("splitInfo")) {
- splitInfo = cmdObj["splitInfo"].Int();
- }
+ verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+ splitInfo = 0;
- jsMaxKeys = 500000;
- reduceTriggerRatio = 10.0;
- maxInMemSize = 500 * 1024;
+ if (cmdObj.hasField("splitInfo")) {
+ splitInfo = cmdObj["splitInfo"].Int();
+ }
- uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 10.0;
+ maxInMemSize = 500 * 1024;
- outputOptions = parseOutputOptions(dbname, cmdObj);
+ uassert(13602, "outType is no longer a valid option", cmdObj["outType"].eoo());
- shardedFirstPass = false;
- if (cmdObj.hasField("shardedFirstPass") && cmdObj["shardedFirstPass"].trueValue()){
- massert(16054,
- "shardedFirstPass should only use replace outType",
- outputOptions.outType == REPLACE);
- shardedFirstPass = true;
- }
+ outputOptions = parseOutputOptions(dbname, cmdObj);
- if ( outputOptions.outType != INMEMORY ) { // setup temp collection name
- tempNamespace = str::stream()
- << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB)
- << ".tmp.mr."
- << cmdObj.firstElement().String()
- << "_"
- << JOB_NUMBER.fetchAndAdd(1);
- incLong = tempNamespace + "_inc";
- }
+ shardedFirstPass = false;
+ if (cmdObj.hasField("shardedFirstPass") && cmdObj["shardedFirstPass"].trueValue()) {
+ massert(16054,
+ "shardedFirstPass should only use replace outType",
+ outputOptions.outType == REPLACE);
+ shardedFirstPass = true;
+ }
- {
- // scope and code
+ if (outputOptions.outType != INMEMORY) { // setup temp collection name
+ tempNamespace = str::stream()
+ << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB) << ".tmp.mr."
+ << cmdObj.firstElement().String() << "_" << JOB_NUMBER.fetchAndAdd(1);
+ incLong = tempNamespace + "_inc";
+ }
- if ( cmdObj["scope"].type() == Object )
- scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
+ {
+ // scope and code
- mapper.reset( new JSMapper( cmdObj["map"] ) );
- reducer.reset( new JSReducer( cmdObj["reduce"] ) );
- if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() )
- finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
+ if (cmdObj["scope"].type() == Object)
+ scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
- if ( cmdObj["mapparams"].type() == Array ) {
- mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
- }
+ mapper.reset(new JSMapper(cmdObj["map"]));
+ reducer.reset(new JSReducer(cmdObj["reduce"]));
+ if (cmdObj["finalize"].type() && cmdObj["finalize"].trueValue())
+ finalizer.reset(new JSFinalizer(cmdObj["finalize"]));
- }
+ if (cmdObj["mapparams"].type() == Array) {
+ mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
+ }
+ }
- {
- // query options
- BSONElement q = cmdObj["query"];
- if ( q.type() == Object )
- filter = q.embeddedObjectUserCheck();
- else
- uassert( 13608 , "query has to be blank or an Object" , ! q.trueValue() );
+ {
+ // query options
+ BSONElement q = cmdObj["query"];
+ if (q.type() == Object)
+ filter = q.embeddedObjectUserCheck();
+ else
+ uassert(13608, "query has to be blank or an Object", !q.trueValue());
+
+
+ BSONElement s = cmdObj["sort"];
+ if (s.type() == Object)
+ sort = s.embeddedObjectUserCheck();
+ else
+ uassert(13609, "sort has to be blank or an Object", !s.trueValue());
+
+ if (cmdObj["limit"].isNumber())
+ limit = cmdObj["limit"].numberLong();
+ else
+ limit = 0;
+ }
+}
+/**
+ * Clean up the temporary and incremental collections
+ */
+void State::dropTempCollections() {
+ _db.dropCollection(_config.tempNamespace);
+ // Always forget about temporary namespaces, so we don't cache lots of them
+ ShardConnection::forgetNS(_config.tempNamespace);
+ if (_useIncremental) {
+ // We don't want to log the deletion of incLong as it isn't replicated. While
+ // harmless, this would lead to a scary looking warning on the secondaries.
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+
+ ScopedTransaction scopedXact(_txn, MODE_IX);
+ Lock::DBLock lk(_txn->lockState(), nsToDatabaseSubstring(_config.incLong), MODE_X);
+ if (Database* db = dbHolder().get(_txn, _config.incLong)) {
+ WriteUnitOfWork wunit(_txn);
+ db->dropCollection(_txn, _config.incLong);
+ wunit.commit();
+ }
- BSONElement s = cmdObj["sort"];
- if ( s.type() == Object )
- sort = s.embeddedObjectUserCheck();
- else
- uassert( 13609 , "sort has to be blank or an Object" , ! s.trueValue() );
+ ShardConnection::forgetNS(_config.incLong);
+ }
+}
- if ( cmdObj["limit"].isNumber() )
- limit = cmdObj["limit"].numberLong();
- else
- limit = 0;
- }
+/**
+ * Create temporary collection, set up indexes
+ */
+void State::prepTempCollection() {
+ if (!_onDisk)
+ return;
+
+ dropTempCollections();
+ if (_useIncremental) {
+ // Create the inc collection and make sure we have index on "0" key.
+ // Intentionally not replicating the inc collection to secondaries.
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+
+ OldClientWriteContext incCtx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* incColl = incCtx.getCollection();
+ invariant(!incColl);
+
+ CollectionOptions options;
+ options.setNoIdIndex();
+ options.temp = true;
+ incColl = incCtx.db()->createCollection(_txn, _config.incLong, options);
+ invariant(incColl);
+
+ BSONObj indexSpec = BSON("key" << BSON("0" << 1) << "ns" << _config.incLong << "name"
+ << "_temp_0");
+ Status status = incColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, indexSpec);
+ if (!status.isOK()) {
+ uasserted(17305,
+ str::stream() << "createIndex failed for mr incLong ns: " << _config.incLong
+ << " err: " << status.code());
}
+ wuow.commit();
+ }
- /**
- * Clean up the temporary and incremental collections
- */
- void State::dropTempCollections() {
- _db.dropCollection(_config.tempNamespace);
- // Always forget about temporary namespaces, so we don't cache lots of them
- ShardConnection::forgetNS( _config.tempNamespace );
- if (_useIncremental) {
- // We don't want to log the deletion of incLong as it isn't replicated. While
- // harmless, this would lead to a scary looking warning on the secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
-
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lk(_txn->lockState(),
- nsToDatabaseSubstring(_config.incLong),
- MODE_X);
- if (Database* db = dbHolder().get(_txn, _config.incLong)) {
- WriteUnitOfWork wunit(_txn);
- db->dropCollection(_txn, _config.incLong);
- wunit.commit();
+ CollectionOptions finalOptions;
+ vector<BSONObj> indexesToInsert;
+
+ {
+ // copy indexes and collection options into temporary storage
+ OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
+ Collection* const finalColl = finalCtx.getCollection();
+ if (finalColl) {
+ finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
+
+ IndexCatalog::IndexIterator ii =
+ finalColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ // Iterate over finalColl's indexes.
+ while (ii.more()) {
+ IndexDescriptor* currIndex = ii.next();
+ BSONObjBuilder b;
+ b.append("ns", _config.tempNamespace);
+
+ // Copy over contents of the index descriptor's infoObj.
+ BSONObjIterator j(currIndex->infoObj());
+ while (j.more()) {
+ BSONElement e = j.next();
+ if (str::equals(e.fieldName(), "_id") || str::equals(e.fieldName(), "ns"))
+ continue;
+ b.append(e);
}
-
- ShardConnection::forgetNS( _config.incLong );
+ indexesToInsert.push_back(b.obj());
}
-
}
+ }
- /**
- * Create temporary collection, set up indexes
- */
- void State::prepTempCollection() {
- if ( ! _onDisk )
- return;
-
- dropTempCollections();
- if (_useIncremental) {
- // Create the inc collection and make sure we have index on "0" key.
- // Intentionally not replicating the inc collection to secondaries.
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
-
- OldClientWriteContext incCtx(_txn, _config.incLong);
- WriteUnitOfWork wuow(_txn);
- Collection* incColl = incCtx.getCollection();
- invariant(!incColl);
-
- CollectionOptions options;
- options.setNoIdIndex();
- options.temp = true;
- incColl = incCtx.db()->createCollection(_txn, _config.incLong, options);
- invariant(incColl);
-
- BSONObj indexSpec = BSON( "key" << BSON( "0" << 1 ) << "ns" << _config.incLong
- << "name" << "_temp_0" );
- Status status = incColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn,
- indexSpec);
- if ( !status.isOK() ) {
- uasserted( 17305 , str::stream() << "createIndex failed for mr incLong ns: " <<
- _config.incLong << " err: " << status.code() );
+ {
+ // create temp collection and insert the indexes from temporary storage
+ OldClientWriteContext tempCtx(_txn, _config.tempNamespace);
+ WriteUnitOfWork wuow(_txn);
+ NamespaceString tempNss(_config.tempNamespace);
+ uassert(ErrorCodes::NotMaster,
+ "no longer master",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(tempNss));
+ Collection* tempColl = tempCtx.getCollection();
+ invariant(!tempColl);
+
+ CollectionOptions options = finalOptions;
+ options.temp = true;
+ tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
+
+ for (vector<BSONObj>::iterator it = indexesToInsert.begin(); it != indexesToInsert.end();
+ ++it) {
+ Status status = tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::IndexAlreadyExists) {
+ continue;
}
- wuow.commit();
+ uassertStatusOK(status);
}
+ // Log the createIndex operation.
+ string logNs = nsToDatabase(_config.tempNamespace) + ".system.indexes";
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it);
+ }
+ wuow.commit();
+ }
+}
- CollectionOptions finalOptions;
- vector<BSONObj> indexesToInsert;
-
- {
- // copy indexes and collection options into temporary storage
- OldClientWriteContext finalCtx(_txn, _config.outputOptions.finalNamespace);
- Collection* const finalColl = finalCtx.getCollection();
- if ( finalColl ) {
- finalOptions = finalColl->getCatalogEntry()->getCollectionOptions(_txn);
-
- IndexCatalog::IndexIterator ii =
- finalColl->getIndexCatalog()->getIndexIterator( _txn, true );
- // Iterate over finalColl's indexes.
- while ( ii.more() ) {
- IndexDescriptor* currIndex = ii.next();
- BSONObjBuilder b;
- b.append( "ns" , _config.tempNamespace );
-
- // Copy over contents of the index descriptor's infoObj.
- BSONObjIterator j( currIndex->infoObj() );
- while ( j.more() ) {
- BSONElement e = j.next();
- if ( str::equals( e.fieldName() , "_id" ) ||
- str::equals( e.fieldName() , "ns" ) )
- continue;
- b.append( e );
- }
- indexesToInsert.push_back( b.obj() );
- }
- }
- }
+/**
+ * For inline mode, appends results to output object.
+ * Makes sure (key, value) tuple is formatted as {_id: key, value: val}
+ */
+void State::appendResults(BSONObjBuilder& final) {
+ if (_onDisk) {
+ if (!_config.outputOptions.outDB.empty()) {
+ BSONObjBuilder loc;
+ if (!_config.outputOptions.outDB.empty())
+ loc.append("db", _config.outputOptions.outDB);
+ if (!_config.outputOptions.collectionName.empty())
+ loc.append("collection", _config.outputOptions.collectionName);
+ final.append("result", loc.obj());
+ } else {
+ if (!_config.outputOptions.collectionName.empty())
+ final.append("result", _config.outputOptions.collectionName);
+ }
- {
- // create temp collection and insert the indexes from temporary storage
- OldClientWriteContext tempCtx(_txn, _config.tempNamespace);
- WriteUnitOfWork wuow(_txn);
- NamespaceString tempNss(_config.tempNamespace);
- uassert(ErrorCodes::NotMaster, "no longer master",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(tempNss));
- Collection* tempColl = tempCtx.getCollection();
- invariant(!tempColl);
-
- CollectionOptions options = finalOptions;
- options.temp = true;
- tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
-
- for ( vector<BSONObj>::iterator it = indexesToInsert.begin();
- it != indexesToInsert.end(); ++it ) {
- Status status =
- tempColl->getIndexCatalog()->createIndexOnEmptyCollection(_txn, *it);
- if (!status.isOK()) {
- if (status.code() == ErrorCodes::IndexAlreadyExists) {
- continue;
- }
- uassertStatusOK(status);
- }
- // Log the createIndex operation.
- string logNs = nsToDatabase( _config.tempNamespace ) + ".system.indexes";
- getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it);
- }
- wuow.commit();
+ if (_config.splitInfo > 0) {
+ // add split points, used for shard
+ BSONObj res;
+ BSONObj idKey = BSON("_id" << 1);
+ if (!_db.runCommand("admin",
+ BSON("splitVector" << _config.outputOptions.finalNamespace
+ << "keyPattern" << idKey << "maxChunkSizeBytes"
+ << _config.splitInfo),
+ res)) {
+ uasserted(15921, str::stream() << "splitVector failed: " << res);
}
-
+ if (res.hasField("splitKeys"))
+ final.append(res.getField("splitKeys"));
}
+ return;
+ }
- /**
- * For inline mode, appends results to output object.
- * Makes sure (key, value) tuple is formatted as {_id: key, value: val}
- */
- void State::appendResults( BSONObjBuilder& final ) {
- if ( _onDisk ) {
- if (!_config.outputOptions.outDB.empty()) {
- BSONObjBuilder loc;
- if ( !_config.outputOptions.outDB.empty())
- loc.append( "db" , _config.outputOptions.outDB );
- if ( !_config.outputOptions.collectionName.empty() )
- loc.append( "collection" , _config.outputOptions.collectionName );
- final.append("result", loc.obj());
- }
- else {
- if ( !_config.outputOptions.collectionName.empty() )
- final.append( "result" , _config.outputOptions.collectionName );
- }
+ if (_jsMode) {
+ ScriptingFunction getResult = _scope->createFunction(
+ "var map = _mrMap;"
+ "var result = [];"
+ "for (key in map) {"
+ " result.push({_id: key, value: map[key]});"
+ "}"
+ "return result;");
+ _scope->invoke(getResult, 0, 0, 0, false);
+ BSONObj obj = _scope->getObject("__returnValue");
+ final.append("results", BSONArray(obj));
+ return;
+ }
- if ( _config.splitInfo > 0 ) {
- // add split points, used for shard
- BSONObj res;
- BSONObj idKey = BSON( "_id" << 1 );
- if (!_db.runCommand("admin",
- BSON("splitVector" << _config.outputOptions.finalNamespace
- << "keyPattern" << idKey
- << "maxChunkSizeBytes" << _config.splitInfo),
- res)) {
- uasserted( 15921 , str::stream() << "splitVector failed: " << res );
- }
- if ( res.hasField( "splitKeys" ) )
- final.append( res.getField( "splitKeys" ) );
- }
- return;
- }
+ uassert(13604, "too much data for in memory map/reduce", _size < BSONObjMaxUserSize);
- if (_jsMode) {
- ScriptingFunction getResult = _scope->createFunction(
- "var map = _mrMap;"
- "var result = [];"
- "for (key in map) {"
- " result.push({_id: key, value: map[key]});"
- "}"
- "return result;");
- _scope->invoke(getResult, 0, 0, 0, false);
- BSONObj obj = _scope->getObject("__returnValue");
- final.append("results", BSONArray(obj));
- return;
- }
+ BSONArrayBuilder b((int)(_size * 1.2)); // _size is data size, doesn't count overhead and keys
- uassert( 13604 , "too much data for in memory map/reduce" , _size < BSONObjMaxUserSize );
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
- BSONArrayBuilder b( (int)(_size * 1.2) ); // _size is data size, doesn't count overhead and keys
+ verify(all.size() == 1);
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONObj key = i->first;
- BSONList& all = i->second;
+ BSONObjIterator vi(all[0]);
+ vi.next();
- verify( all.size() == 1 );
+ BSONObjBuilder temp(b.subobjStart());
+ temp.appendAs(key.firstElement(), "_id");
+ temp.appendAs(vi.next(), "value");
+ temp.done();
+ }
- BSONObjIterator vi( all[0] );
- vi.next();
+ BSONArray res = b.arr();
+ final.append("results", res);
+}
- BSONObjBuilder temp( b.subobjStart() );
- temp.appendAs( key.firstElement() , "_id" );
- temp.appendAs( vi.next() , "value" );
- temp.done();
- }
+/**
+ * Does post processing on output collection.
+ * This may involve replacing, merging or reducing.
+ */
+long long State::postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
+ if (_onDisk == false || _config.outputOptions.outType == Config::INMEMORY)
+ return numInMemKeys();
- BSONArray res = b.arr();
- final.append( "results" , res );
- }
+ if (_config.outputOptions.outNonAtomic)
+ return postProcessCollectionNonAtomic(txn, op, pm);
- /**
- * Does post processing on output collection.
- * This may involve replacing, merging or reducing.
- */
- long long State::postProcessCollection(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
+ invariant(!txn->lockState()->isLocked());
- if ( _onDisk == false || _config.outputOptions.outType == Config::INMEMORY )
- return numInMemKeys();
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(
+ txn->lockState()); // TODO(erh): this is how it was, but seems it doesn't need to be global
+ return postProcessCollectionNonAtomic(txn, op, pm);
+}
- if (_config.outputOptions.outNonAtomic)
- return postProcessCollectionNonAtomic(txn, op, pm);
+//
+// For SERVER-6116 - can't handle version errors in count currently
+//
- invariant( !txn->lockState()->isLocked() );
+/**
+ * Runs count and disables version errors.
+ *
+ * TODO: make count work with versioning
+ */
+unsigned long long _safeCount(Client* client,
+ // Can't be const b/c count isn't
+ /* const */ DBDirectClient& db,
+ const string& ns,
+ const BSONObj& query = BSONObj(),
+ int options = 0,
+ int limit = 0,
+ int skip = 0) {
+ ShardForceVersionOkModeBlock ignoreVersion(client); // ignore versioning here
+ return db.count(ns, query, options, limit, skip);
+}
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): this is how it was, but seems it doesn't need to be global
- return postProcessCollectionNonAtomic(txn, op, pm);
+//
+// End SERVER-6116
+//
+
+long long State::postProcessCollectionNonAtomic(OperationContext* txn,
+ CurOp* op,
+ ProgressMeterHolder& pm) {
+ auto client = txn->getClient();
+
+ if (_config.outputOptions.finalNamespace == _config.tempNamespace)
+ return _safeCount(client, _db, _config.outputOptions.finalNamespace);
+
+ if (_config.outputOptions.outType == Config::REPLACE ||
+ _safeCount(client, _db, _config.outputOptions.finalNamespace) == 0) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): why global???
+ // replace: just rename from temp to final collection name, dropping previous collection
+ _db.dropCollection(_config.outputOptions.finalNamespace);
+ BSONObj info;
+
+ if (!_db.runCommand("admin",
+ BSON("renameCollection" << _config.tempNamespace << "to"
+ << _config.outputOptions.finalNamespace
+ << "stayTemp" << _config.shardedFirstPass),
+ info)) {
+ uasserted(10076, str::stream() << "rename failed: " << info);
}
- //
- // For SERVER-6116 - can't handle version errors in count currently
- //
-
- /**
- * Runs count and disables version errors.
- *
- * TODO: make count work with versioning
- */
- unsigned long long _safeCount( Client* client,
- // Can't be const b/c count isn't
- /* const */ DBDirectClient& db,
- const string &ns,
- const BSONObj& query = BSONObj(),
- int options = 0,
- int limit = 0,
- int skip = 0 )
+ _db.dropCollection(_config.tempNamespace);
+ } else if (_config.outputOptions.outType == Config::MERGE) {
+ // merge: upsert new docs into old collection
{
- ShardForceVersionOkModeBlock ignoreVersion(client); // ignore versioning here
- return db.count( ns, query, options, limit, skip );
+ const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock(
+ "m/r: merge post processing", "M/R Merge Post Processing Progress", count);
}
+ unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace, BSONObj());
+ while (cursor->more()) {
+ ScopedTransaction scopedXact(_txn, MODE_IX);
+ Lock::DBLock lock(_txn->lockState(),
+ nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
+ MODE_X);
+ BSONObj o = cursor->nextSafe();
+ Helpers::upsert(_txn, _config.outputOptions.finalNamespace, o);
+ pm.hit();
+ }
+ _db.dropCollection(_config.tempNamespace);
+ pm.finished();
+ } else if (_config.outputOptions.outType == Config::REDUCE) {
+ // reduce: apply reduce op on new result and existing one
+ BSONList values;
- //
- // End SERVER-6116
- //
-
- long long State::postProcessCollectionNonAtomic(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm) {
-
- auto client = txn->getClient();
-
- if ( _config.outputOptions.finalNamespace == _config.tempNamespace )
- return _safeCount( client, _db, _config.outputOptions.finalNamespace );
-
- if (_config.outputOptions.outType == Config::REPLACE ||
- _safeCount(client, _db, _config.outputOptions.finalNamespace) == 0) {
-
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh): why global???
- // replace: just rename from temp to final collection name, dropping previous collection
- _db.dropCollection( _config.outputOptions.finalNamespace );
- BSONObj info;
+ {
+ const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock(
+ "m/r: reduce post processing", "M/R Reduce Post Processing Progress", count);
+ }
+ unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace, BSONObj());
+ while (cursor->more()) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
+ BSONObj temp = cursor->nextSafe();
+ BSONObj old;
- if ( ! _db.runCommand( "admin"
- , BSON( "renameCollection" << _config.tempNamespace <<
- "to" << _config.outputOptions.finalNamespace <<
- "stayTemp" << _config.shardedFirstPass )
- , info ) ) {
- uasserted( 10076 , str::stream() << "rename failed: " << info );
- }
-
- _db.dropCollection( _config.tempNamespace );
- }
- else if ( _config.outputOptions.outType == Config::MERGE ) {
- // merge: upsert new docs into old collection
- {
- const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: merge post processing",
- "M/R Merge Post Processing Progress",
- count);
- }
- unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
- while (cursor->more()) {
- ScopedTransaction scopedXact(_txn, MODE_IX);
- Lock::DBLock lock(_txn->lockState(),
- nsToDatabaseSubstring(_config.outputOptions.finalNamespace),
- MODE_X);
- BSONObj o = cursor->nextSafe();
- Helpers::upsert( _txn, _config.outputOptions.finalNamespace , o );
- pm.hit();
- }
- _db.dropCollection( _config.tempNamespace );
- pm.finished();
+ bool found;
+ {
+ const std::string& finalNamespace = _config.outputOptions.finalNamespace;
+ OldClientContext tx(txn, finalNamespace);
+ Collection* coll = getCollectionOrUassert(tx.db(), finalNamespace);
+ found = Helpers::findOne(_txn, coll, temp["_id"].wrap(), old, true);
}
- else if ( _config.outputOptions.outType == Config::REDUCE ) {
- // reduce: apply reduce op on new result and existing one
- BSONList values;
-
- {
- const auto count = _safeCount(client, _db, _config.tempNamespace, BSONObj());
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: reduce post processing",
- "M/R Reduce Post Processing Progress",
- count);
- }
- unique_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
- while ( cursor->more() ) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
- BSONObj temp = cursor->nextSafe();
- BSONObj old;
-
- bool found;
- {
- const std::string& finalNamespace = _config.outputOptions.finalNamespace;
- OldClientContext tx(txn, finalNamespace);
- Collection* coll = getCollectionOrUassert(tx.db(), finalNamespace);
- found = Helpers::findOne(_txn,
- coll,
- temp["_id"].wrap(),
- old,
- true);
- }
- if ( found ) {
- // need to reduce
- values.clear();
- values.push_back( temp );
- values.push_back( old );
- Helpers::upsert(_txn,
- _config.outputOptions.finalNamespace,
- _config.reducer->finalReduce(values,
- _config.finalizer.get()));
- }
- else {
- Helpers::upsert( _txn, _config.outputOptions.finalNamespace , temp );
- }
- pm.hit();
- }
- pm.finished();
+ if (found) {
+ // need to reduce
+ values.clear();
+ values.push_back(temp);
+ values.push_back(old);
+ Helpers::upsert(_txn,
+ _config.outputOptions.finalNamespace,
+ _config.reducer->finalReduce(values, _config.finalizer.get()));
+ } else {
+ Helpers::upsert(_txn, _config.outputOptions.finalNamespace, temp);
}
-
- return _safeCount( txn->getClient(), _db, _config.outputOptions.finalNamespace );
+ pm.hit();
}
+ pm.finished();
+ }
- /**
- * Insert doc in collection. This should be replicated.
- */
- void State::insert( const string& ns , const BSONObj& o ) {
- verify( _onDisk );
+ return _safeCount(txn->getClient(), _db, _config.outputOptions.finalNamespace);
+}
+/**
+ * Insert doc in collection. This should be replicated.
+ */
+void State::insert(const string& ns, const BSONObj& o) {
+ verify(_onDisk);
- OldClientWriteContext ctx(_txn, ns );
- WriteUnitOfWork wuow(_txn);
- NamespaceString nss(ns);
- uassert(ErrorCodes::NotMaster, "no longer master",
- repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss));
- Collection* coll = getCollectionOrUassert(ctx.db(), ns);
- BSONObjBuilder b;
- if ( !o.hasField( "_id" ) ) {
- b.appendOID( "_id", NULL, true );
- }
- b.appendElements(o);
- BSONObj bo = b.obj();
+ OldClientWriteContext ctx(_txn, ns);
+ WriteUnitOfWork wuow(_txn);
+ NamespaceString nss(ns);
+ uassert(ErrorCodes::NotMaster,
+ "no longer master",
+ repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss));
+ Collection* coll = getCollectionOrUassert(ctx.db(), ns);
- uassertStatusOK( coll->insertDocument( _txn, bo, true ).getStatus() );
- wuow.commit();
- }
+ BSONObjBuilder b;
+ if (!o.hasField("_id")) {
+ b.appendOID("_id", NULL, true);
+ }
+ b.appendElements(o);
+ BSONObj bo = b.obj();
- /**
- * Insert doc into the inc collection. This should not be replicated.
- */
- void State::_insertToInc( BSONObj& o ) {
- verify( _onDisk );
-
- OldClientWriteContext ctx(_txn, _config.incLong );
- WriteUnitOfWork wuow(_txn);
- Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
- bool shouldReplicateWrites = _txn->writesAreReplicated();
- _txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
- uassertStatusOK(coll->insertDocument(_txn, o, true, false).getStatus());
- wuow.commit();
- }
+ uassertStatusOK(coll->insertDocument(_txn, bo, true).getStatus());
+ wuow.commit();
+}
- State::State(OperationContext* txn, const Config& c) :
- _config(c),
- _db(txn),
- _useIncremental(true),
- _txn(txn),
- _size(0),
- _dupCount(0),
- _numEmits(0) {
- _temp.reset( new InMemory() );
- _onDisk = _config.outputOptions.outType != Config::INMEMORY;
- }
+/**
+ * Insert doc into the inc collection. This should not be replicated.
+ */
+void State::_insertToInc(BSONObj& o) {
+ verify(_onDisk);
+
+ OldClientWriteContext ctx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ uassertStatusOK(coll->insertDocument(_txn, o, true, false).getStatus());
+ wuow.commit();
+}
- bool State::sourceExists() {
- return _db.exists( _config.ns );
- }
+State::State(OperationContext* txn, const Config& c)
+ : _config(c), _db(txn), _useIncremental(true), _txn(txn), _size(0), _dupCount(0), _numEmits(0) {
+ _temp.reset(new InMemory());
+ _onDisk = _config.outputOptions.outType != Config::INMEMORY;
+}
- long long State::incomingDocuments() {
- return _safeCount( _txn->getClient(), _db, _config.ns , _config.filter , QueryOption_SlaveOk , (unsigned) _config.limit );
- }
+bool State::sourceExists() {
+ return _db.exists(_config.ns);
+}
- State::~State() {
- if ( _onDisk ) {
- try {
- dropTempCollections();
- }
- catch ( std::exception& e ) {
- error() << "couldn't cleanup after map reduce: " << e.what() << endl;
- }
- }
- if (_scope && !_scope->isKillPending() && _scope->getError().empty()) {
- // cleanup js objects
- try {
- ScriptingFunction cleanup =
- _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
- _scope->invoke(cleanup, 0, 0, 0, true);
- }
- catch (const DBException &) {
- // not important because properties will be reset if scope is reused
- LOG(1) << "MapReduce terminated during state destruction" << endl;
- }
- }
- }
+long long State::incomingDocuments() {
+ return _safeCount(_txn->getClient(),
+ _db,
+ _config.ns,
+ _config.filter,
+ QueryOption_SlaveOk,
+ (unsigned)_config.limit);
+}
- /**
- * Initialize the mapreduce operation, creating the inc collection
- */
- void State::init() {
- // setup js
- const string userToken = AuthorizationSession::get(ClientBasic::getCurrent())
- ->getAuthenticatedUserNamesToken();
- _scope.reset(globalScriptEngine->getPooledScope(
- _txn, _config.dbname, "mapreduce" + userToken).release());
-
- if ( ! _config.scopeSetup.isEmpty() )
- _scope->init( &_config.scopeSetup );
-
- _config.mapper->init( this );
- _config.reducer->init( this );
- if ( _config.finalizer )
- _config.finalizer->init( this );
- _scope->setBoolean("_doFinal", _config.finalizer.get() != 0);
-
- switchMode(_config.jsMode); // set up js-mode based on Config
-
- // global JS map/reduce hashmap
- // we use a standard JS object which means keys are only simple types
- // we could also add a real hashmap from a library and object comparison methods
- // for increased performance, we may want to look at v8 Harmony Map support
- // _scope->setObject("_mrMap", BSONObj(), false);
- ScriptingFunction init = _scope->createFunction(
- "_emitCt = 0;"
- "_keyCt = 0;"
- "_dupCt = 0;"
- "_redCt = 0;"
- "if (typeof(_mrMap) === 'undefined') {"
- " _mrMap = {};"
- "}");
- _scope->invoke(init, 0, 0, 0, true);
-
- // js function to run reduce on all keys
- // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
- _reduceAll = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length != 1) {"
- " ret = _reduce(key, list);"
- " map[key] = [ret];"
- " ++_redCt;"
- " }"
- "}"
- "_dupCt = 0;");
- massert(16717, "error initializing JavaScript reduceAll function",
- _reduceAll != 0);
-
- _reduceAndEmit = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1)"
- " ret = list[0];"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " emit(key, ret);"
- "}"
- "delete _mrMap;");
- massert(16718, "error initializing JavaScript reduce/emit function",
- _reduceAndEmit != 0);
-
- _reduceAndFinalize = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1) {"
- " if (!_doFinal) { continue; }"
- " ret = list[0];"
- " }"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " if (_doFinal)"
- " ret = _finalize(key, ret);"
- " map[key] = ret;"
- "}");
- massert(16719, "error creating JavaScript reduce/finalize function",
- _reduceAndFinalize != 0);
-
- _reduceAndFinalizeAndInsert = _scope->createFunction(
- "var map = _mrMap;"
- "var list, ret;"
- "for (var key in map) {"
- " list = map[key];"
- " if (list.length == 1)"
- " ret = list[0];"
- " else {"
- " ret = _reduce(key, list);"
- " ++_redCt;"
- " }"
- " if (_doFinal)"
- " ret = _finalize(key, ret);"
- " _nativeToTemp({_id: key, value: ret});"
- "}");
- massert(16720, "error initializing JavaScript functions",
- _reduceAndFinalizeAndInsert != 0);
+State::~State() {
+ if (_onDisk) {
+ try {
+ dropTempCollections();
+ } catch (std::exception& e) {
+ error() << "couldn't cleanup after map reduce: " << e.what() << endl;
}
-
- void State::switchMode(bool jsMode) {
- _jsMode = jsMode;
- if (jsMode) {
- // emit function that stays in JS
- _scope->setFunction("emit",
- "function(key, value) {"
- " if (typeof(key) === 'object') {"
- " _bailFromJS(key, value);"
- " return;"
- " }"
- " ++_emitCt;"
- " var map = _mrMap;"
- " var list = map[key];"
- " if (!list) {"
- " ++_keyCt;"
- " list = [];"
- " map[key] = list;"
- " }"
- " else"
- " ++_dupCt;"
- " list.push(value);"
- "}");
- _scope->injectNative("_bailFromJS", _bailFromJS, this);
- }
- else {
- // emit now populates C++ map
- _scope->injectNative( "emit" , fast_emit, this );
- }
+ }
+ if (_scope && !_scope->isKillPending() && _scope->getError().empty()) {
+ // cleanup js objects
+ try {
+ ScriptingFunction cleanup =
+ _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ } catch (const DBException&) {
+ // not important because properties will be reset if scope is reused
+ LOG(1) << "MapReduce terminated during state destruction" << endl;
}
+ }
+}
- void State::bailFromJS() {
- LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+/**
+ * Initialize the mapreduce operation, creating the inc collection
+ */
+void State::init() {
+ // setup js
+ const string userToken =
+ AuthorizationSession::get(ClientBasic::getCurrent())->getAuthenticatedUserNamesToken();
+ _scope.reset(globalScriptEngine->getPooledScope(_txn, _config.dbname, "mapreduce" + userToken)
+ .release());
+
+ if (!_config.scopeSetup.isEmpty())
+ _scope->init(&_config.scopeSetup);
+
+ _config.mapper->init(this);
+ _config.reducer->init(this);
+ if (_config.finalizer)
+ _config.finalizer->init(this);
+ _scope->setBoolean("_doFinal", _config.finalizer.get() != 0);
+
+ switchMode(_config.jsMode); // set up js-mode based on Config
+
+ // global JS map/reduce hashmap
+ // we use a standard JS object which means keys are only simple types
+ // we could also add a real hashmap from a library and object comparison methods
+ // for increased performance, we may want to look at v8 Harmony Map support
+ // _scope->setObject("_mrMap", BSONObj(), false);
+ ScriptingFunction init = _scope->createFunction(
+ "_emitCt = 0;"
+ "_keyCt = 0;"
+ "_dupCt = 0;"
+ "_redCt = 0;"
+ "if (typeof(_mrMap) === 'undefined') {"
+ " _mrMap = {};"
+ "}");
+ _scope->invoke(init, 0, 0, 0, true);
+
+ // js function to run reduce on all keys
+ // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ _reduceAll = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length != 1) {"
+ " ret = _reduce(key, list);"
+ " map[key] = [ret];"
+ " ++_redCt;"
+ " }"
+ "}"
+ "_dupCt = 0;");
+ massert(16717, "error initializing JavaScript reduceAll function", _reduceAll != 0);
+
+ _reduceAndEmit = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1)"
+ " ret = list[0];"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " emit(key, ret);"
+ "}"
+ "delete _mrMap;");
+ massert(16718, "error initializing JavaScript reduce/emit function", _reduceAndEmit != 0);
+
+ _reduceAndFinalize = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1) {"
+ " if (!_doFinal) { continue; }"
+ " ret = list[0];"
+ " }"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " if (_doFinal)"
+ " ret = _finalize(key, ret);"
+ " map[key] = ret;"
+ "}");
+ massert(16719, "error creating JavaScript reduce/finalize function", _reduceAndFinalize != 0);
+
+ _reduceAndFinalizeAndInsert = _scope->createFunction(
+ "var map = _mrMap;"
+ "var list, ret;"
+ "for (var key in map) {"
+ " list = map[key];"
+ " if (list.length == 1)"
+ " ret = list[0];"
+ " else {"
+ " ret = _reduce(key, list);"
+ " ++_redCt;"
+ " }"
+ " if (_doFinal)"
+ " ret = _finalize(key, ret);"
+ " _nativeToTemp({_id: key, value: ret});"
+ "}");
+ massert(16720, "error initializing JavaScript functions", _reduceAndFinalizeAndInsert != 0);
+}
- // reduce and reemit into c++
- switchMode(false);
- _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
- // need to get the real number emitted so far
- _numEmits = _scope->getNumberInt("_emitCt");
- _config.reducer->numReduces = _scope->getNumberInt("_redCt");
- }
+void State::switchMode(bool jsMode) {
+ _jsMode = jsMode;
+ if (jsMode) {
+ // emit function that stays in JS
+ _scope->setFunction("emit",
+ "function(key, value) {"
+ " if (typeof(key) === 'object') {"
+ " _bailFromJS(key, value);"
+ " return;"
+ " }"
+ " ++_emitCt;"
+ " var map = _mrMap;"
+ " var list = map[key];"
+ " if (!list) {"
+ " ++_keyCt;"
+ " list = [];"
+ " map[key] = list;"
+ " }"
+ " else"
+ " ++_dupCt;"
+ " list.push(value);"
+ "}");
+ _scope->injectNative("_bailFromJS", _bailFromJS, this);
+ } else {
+ // emit now populates C++ map
+ _scope->injectNative("emit", fast_emit, this);
+ }
+}
- Collection* State::getCollectionOrUassert(Database* db, StringData ns) {
- Collection* out = db ? db->getCollection(ns) : NULL;
- uassert(18697, "Collection unexpectedly disappeared: " + ns.toString(),
- out);
- return out;
- }
+void State::bailFromJS() {
+ LOG(1) << "M/R: Switching from JS mode to mixed mode" << endl;
- /**
- * Applies last reduce and finalize on a list of tuples (key, val)
- * Inserts single result {_id: key, value: val} into temp collection
- */
- void State::finalReduce( BSONList& values ) {
- if ( !_onDisk || values.size() == 0 )
- return;
+ // reduce and reemit into c++
+ switchMode(false);
+ _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
+ // need to get the real number emitted so far
+ _numEmits = _scope->getNumberInt("_emitCt");
+ _config.reducer->numReduces = _scope->getNumberInt("_redCt");
+}
- BSONObj res = _config.reducer->finalReduce( values , _config.finalizer.get() );
- insert( _config.tempNamespace , res );
- }
+Collection* State::getCollectionOrUassert(Database* db, StringData ns) {
+ Collection* out = db ? db->getCollection(ns) : NULL;
+ uassert(18697, "Collection unexpectedly disappeared: " + ns.toString(), out);
+ return out;
+}
- BSONObj _nativeToTemp( const BSONObj& args, void* data ) {
- State* state = (State*) data;
- BSONObjIterator it(args);
- state->insert(state->_config.tempNamespace, it.next().Obj());
- return BSONObj();
- }
+/**
+ * Applies last reduce and finalize on a list of tuples (key, val)
+ * Inserts single result {_id: key, value: val} into temp collection
+ */
+void State::finalReduce(BSONList& values) {
+ if (!_onDisk || values.size() == 0)
+ return;
+
+ BSONObj res = _config.reducer->finalReduce(values, _config.finalizer.get());
+ insert(_config.tempNamespace, res);
+}
+
+BSONObj _nativeToTemp(const BSONObj& args, void* data) {
+ State* state = (State*)data;
+ BSONObjIterator it(args);
+ state->insert(state->_config.tempNamespace, it.next().Obj());
+ return BSONObj();
+}
// BSONObj _nativeToInc( const BSONObj& args, void* data ) {
// State* state = (State*) data;
@@ -956,807 +922,791 @@ namespace mongo {
// return BSONObj();
// }
- /**
- * Applies last reduce and finalize.
- * After calling this method, the temp collection will be completed.
- * If inline, the results will be in the in memory map
- */
- void State::finalReduce(CurOp * op , ProgressMeterHolder& pm ) {
-
- if (_jsMode) {
- // apply the reduce within JS
- if (_onDisk) {
- _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
- _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
- return;
- }
- else {
- _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
- return;
- }
- }
+/**
+ * Applies last reduce and finalize.
+ * After calling this method, the temp collection will be completed.
+ * If inline, the results will be in the in memory map
+ */
+void State::finalReduce(CurOp* op, ProgressMeterHolder& pm) {
+ if (_jsMode) {
+ // apply the reduce within JS
+ if (_onDisk) {
+ _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
+ _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
+ return;
+ } else {
+ _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
+ return;
+ }
+ }
- if ( ! _onDisk ) {
- // all data has already been reduced, just finalize
- if ( _config.finalizer ) {
- long size = 0;
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONObj key = i->first;
- BSONList& all = i->second;
+ if (!_onDisk) {
+ // all data has already been reduced, just finalize
+ if (_config.finalizer) {
+ long size = 0;
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
- verify( all.size() == 1 );
+ verify(all.size() == 1);
- BSONObj res = _config.finalizer->finalize( all[0] );
+ BSONObj res = _config.finalizer->finalize(all[0]);
- all.clear();
- all.push_back( res );
- size += res.objsize();
- }
- _size = size;
- }
- return;
+ all.clear();
+ all.push_back(res);
+ size += res.objsize();
}
+ _size = size;
+ }
+ return;
+ }
- // use index on "0" to pull sorted data
- verify( _temp->size() == 0 );
- BSONObj sortKey = BSON( "0" << 1 );
+ // use index on "0" to pull sorted data
+ verify(_temp->size() == 0);
+ BSONObj sortKey = BSON("0" << 1);
+
+ {
+ OldClientWriteContext incCtx(_txn, _config.incLong);
+ WriteUnitOfWork wuow(_txn);
+ Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong);
+
+ bool foundIndex = false;
+ IndexCatalog::IndexIterator ii = incColl->getIndexCatalog()->getIndexIterator(_txn, true);
+ // Iterate over incColl's indexes.
+ while (ii.more()) {
+ IndexDescriptor* currIndex = ii.next();
+ BSONObj x = currIndex->infoObj();
+ if (sortKey.woCompare(x["key"].embeddedObject()) == 0) {
+ foundIndex = true;
+ break;
+ }
+ }
- {
- OldClientWriteContext incCtx(_txn, _config.incLong );
- WriteUnitOfWork wuow(_txn);
- Collection* incColl = getCollectionOrUassert(incCtx.db(), _config.incLong );
-
- bool foundIndex = false;
- IndexCatalog::IndexIterator ii =
- incColl->getIndexCatalog()->getIndexIterator( _txn, true );
- // Iterate over incColl's indexes.
- while ( ii.more() ) {
- IndexDescriptor* currIndex = ii.next();
- BSONObj x = currIndex->infoObj();
- if ( sortKey.woCompare( x["key"].embeddedObject() ) == 0 ) {
- foundIndex = true;
- break;
- }
- }
+ verify(foundIndex);
+ wuow.commit();
+ }
- verify( foundIndex );
- wuow.commit();
- }
+ unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
- unique_ptr<AutoGetCollectionForRead> ctx(new AutoGetCollectionForRead(_txn, _config.incLong));
+ BSONObj prev;
+ BSONList all;
- BSONObj prev;
- BSONList all;
+ {
+ const auto count = _db.count(_config.incLong, BSONObj(), QueryOption_SlaveOk);
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ verify(pm ==
+ op->setMessage_inlock("m/r: (3/3) final reduce to collection",
+ "M/R: (3/3) Final Reduce Progress",
+ count));
+ }
- {
- const auto count = _db.count(_config.incLong, BSONObj(), QueryOption_SlaveOk);
- stdx::lock_guard<Client> lk(*_txn->getClient());
- verify(pm == op->setMessage_inlock("m/r: (3/3) final reduce to collection",
- "M/R: (3/3) Final Reduce Progress",
- count));
+ const NamespaceString nss(_config.incLong);
+ const WhereCallbackReal whereCallback(_txn, nss.db());
+
+ CanonicalQuery* cqRaw;
+ verify(CanonicalQuery::canonicalize(
+ _config.incLong, BSONObj(), sortKey, BSONObj(), &cqRaw, whereCallback).isOK());
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
+ invariant(coll);
+
+ PlanExecutor* rawExec;
+ verify(getExecutor(_txn,
+ coll,
+ cq.release(),
+ PlanExecutor::YIELD_AUTO,
+ &rawExec,
+ QueryPlannerParams::NO_TABLE_SCAN).isOK());
+
+ unique_ptr<PlanExecutor> exec(rawExec);
+
+ // iterate over all sorted objects
+ BSONObj o;
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
+ o = o.getOwned(); // we will be accessing outside of the lock
+ pm.hit();
+
+ if (o.woSortOrder(prev, sortKey) == 0) {
+ // object is same as previous, add to array
+ all.push_back(o);
+ if (pm->hits() % 100 == 0) {
+ _txn->checkForInterrupt();
}
+ continue;
+ }
- const NamespaceString nss(_config.incLong);
- const WhereCallbackReal whereCallback(_txn, nss.db());
-
- CanonicalQuery* cqRaw;
- verify(CanonicalQuery::canonicalize(_config.incLong,
- BSONObj(),
- sortKey,
- BSONObj(),
- &cqRaw,
- whereCallback).isOK());
- std::unique_ptr<CanonicalQuery> cq(cqRaw);
-
- Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
- invariant(coll);
-
- PlanExecutor* rawExec;
- verify(getExecutor(_txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec,
- QueryPlannerParams::NO_TABLE_SCAN).isOK());
-
- unique_ptr<PlanExecutor> exec(rawExec);
-
- // iterate over all sorted objects
- BSONObj o;
- PlanExecutor::ExecState state;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) {
- o = o.getOwned(); // we will be accessing outside of the lock
- pm.hit();
-
- if ( o.woSortOrder( prev , sortKey ) == 0 ) {
- // object is same as previous, add to array
- all.push_back( o );
- if ( pm->hits() % 100 == 0 ) {
- _txn->checkForInterrupt();
- }
- continue;
- }
+ exec->saveState();
- exec->saveState();
+ ctx.reset();
- ctx.reset();
+ // reduce a finalize array
+ finalReduce(all);
- // reduce a finalize array
- finalReduce( all );
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ all.clear();
+ prev = o;
+ all.push_back(o);
- all.clear();
- prev = o;
- all.push_back( o );
+ if (!exec->restoreState(_txn)) {
+ break;
+ }
- if (!exec->restoreState(_txn)) {
- break;
- }
+ _txn->checkForInterrupt();
+ }
- _txn->checkForInterrupt();
- }
+ ctx.reset();
+ // reduce and finalize last array
+ finalReduce(all);
+ ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
- ctx.reset();
- // reduce and finalize last array
- finalReduce( all );
- ctx.reset(new AutoGetCollectionForRead(_txn, _config.incLong));
+ pm.finished();
+}
- pm.finished();
- }
+/**
+ * Attempts to reduce objects in the memory map.
+ * A new memory map will be created to hold the results.
+ * If applicable, objects with unique key may be dumped to inc collection.
+ * Input and output objects are both {"0": key, "1": val}
+ */
+void State::reduceInMemory() {
+ if (_jsMode) {
+ // in js mode the reduce is applied when writing to collection
+ return;
+ }
- /**
- * Attempts to reduce objects in the memory map.
- * A new memory map will be created to hold the results.
- * If applicable, objects with unique key may be dumped to inc collection.
- * Input and output objects are both {"0": key, "1": val}
- */
- void State::reduceInMemory() {
-
- if (_jsMode) {
- // in js mode the reduce is applied when writing to collection
- return;
+ unique_ptr<InMemory> n(new InMemory()); // for new data
+ long nSize = 0;
+ _dupCount = 0;
+
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); ++i) {
+ BSONList& all = i->second;
+
+ if (all.size() == 1) {
+ // only 1 value for this key
+ if (_onDisk) {
+ // this key has low cardinality, so just write to collection
+ _insertToInc(*(all.begin()));
+ } else {
+ // add to new map
+ nSize += _add(n.get(), all[0]);
}
+ } else if (all.size() > 1) {
+ // several values, reduce and add to map
+ BSONObj res = _config.reducer->reduce(all);
+ nSize += _add(n.get(), res);
+ }
+ }
- unique_ptr<InMemory> n( new InMemory() ); // for new data
- long nSize = 0;
- _dupCount = 0;
+ // swap maps
+ _temp.reset(n.release());
+ _size = nSize;
+}
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
- BSONList& all = i->second;
+/**
+ * Dumps the entire in memory map to the inc collection.
+ */
+void State::dumpToInc() {
+ if (!_onDisk)
+ return;
- if ( all.size() == 1 ) {
- // only 1 value for this key
- if ( _onDisk ) {
- // this key has low cardinality, so just write to collection
- _insertToInc( *(all.begin()) );
- }
- else {
- // add to new map
- nSize += _add(n.get(), all[0]);
- }
- }
- else if ( all.size() > 1 ) {
- // several values, reduce and add to map
- BSONObj res = _config.reducer->reduce( all );
- nSize += _add(n.get(), res);
- }
- }
+ for (InMemory::iterator i = _temp->begin(); i != _temp->end(); i++) {
+ BSONList& all = i->second;
+ if (all.size() < 1)
+ continue;
- // swap maps
- _temp.reset( n.release() );
- _size = nSize;
- }
+ for (BSONList::iterator j = all.begin(); j != all.end(); j++)
+ _insertToInc(*j);
+ }
+ _temp->clear();
+ _size = 0;
+}
- /**
- * Dumps the entire in memory map to the inc collection.
- */
- void State::dumpToInc() {
- if ( ! _onDisk )
- return;
+/**
+ * Adds object to in memory map
+ */
+void State::emit(const BSONObj& a) {
+ _numEmits++;
+ _size += _add(_temp.get(), a);
+}
- for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
- BSONList& all = i->second;
- if ( all.size() < 1 )
- continue;
+int State::_add(InMemory* im, const BSONObj& a) {
+ BSONList& all = (*im)[a];
+ all.push_back(a);
+ if (all.size() > 1) {
+ ++_dupCount;
+ }
- for ( BSONList::iterator j=all.begin(); j!=all.end(); j++ )
- _insertToInc( *j );
- }
- _temp->clear();
- _size = 0;
+ return a.objsize() + 16;
+}
+
+void State::reduceAndSpillInMemoryStateIfNeeded() {
+ // Make sure no DB locks are held, because this method manages its own locking and
+ // write units of work.
+ invariant(!_txn->lockState()->isLocked());
+
+ if (_jsMode) {
+ // try to reduce if it is beneficial
+ int dupCt = _scope->getNumberInt("_dupCt");
+ int keyCt = _scope->getNumberInt("_keyCt");
+
+ if (keyCt > _config.jsMaxKeys) {
+ // too many keys for JS, switch to mixed
+ _bailFromJS(BSONObj(), this);
+ // then fall through to check map size
+ } else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
+ // reduce now to lower mem usage
+ Timer t;
+ _scope->invoke(_reduceAll, 0, 0, 0, true);
+ LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt
+ << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis()
+ << "ms" << endl;
+ return;
}
+ }
- /**
- * Adds object to in memory map
- */
- void State::emit( const BSONObj& a ) {
- _numEmits++;
- _size += _add(_temp.get(), a);
+ if (_jsMode)
+ return;
+
+ if (_size > _config.maxInMemSize || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
+ // attempt to reduce in memory map, if memory is too high or we have many duplicates
+ long oldSize = _size;
+ Timer t;
+ reduceInMemory();
+ LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount
+ << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
+
+ // if size is still high, or values are not reducing well, dump
+ if (_onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2)) {
+ dumpToInc();
+ LOG(3) << " MR - dumping to db" << endl;
}
+ }
+}
- int State::_add(InMemory* im, const BSONObj& a) {
- BSONList& all = (*im)[a];
- all.push_back( a );
- if (all.size() > 1) {
- ++_dupCount;
- }
+/**
+ * emit that will be called by js function
+ */
+BSONObj fast_emit(const BSONObj& args, void* data) {
+ uassert(10077, "fast_emit takes 2 args", args.nFields() == 2);
+ uassert(13069,
+ "an emit can't be more than half max bson size",
+ args.objsize() < (BSONObjMaxUserSize / 2));
+
+ State* state = (State*)data;
+ if (args.firstElement().type() == Undefined) {
+ BSONObjBuilder b(args.objsize());
+ b.appendNull("");
+ BSONObjIterator i(args);
+ i.next();
+ b.append(i.next());
+ state->emit(b.obj());
+ } else {
+ state->emit(args);
+ }
+ return BSONObj();
+}
- return a.objsize() + 16;
- }
+/**
+ * function is called when we realize we cant use js mode for m/r on the 1st key
+ */
+BSONObj _bailFromJS(const BSONObj& args, void* data) {
+ State* state = (State*)data;
+ state->bailFromJS();
- void State::reduceAndSpillInMemoryStateIfNeeded() {
- // Make sure no DB locks are held, because this method manages its own locking and
- // write units of work.
- invariant(!_txn->lockState()->isLocked());
+ // emit this particular key if there is one
+ if (!args.isEmpty()) {
+ fast_emit(args, data);
+ }
+ return BSONObj();
+}
- if (_jsMode) {
- // try to reduce if it is beneficial
- int dupCt = _scope->getNumberInt("_dupCt");
- int keyCt = _scope->getNumberInt("_keyCt");
+/**
+ * This class represents a map/reduce command executed on a single server
+ */
+class MapReduceCommand : public Command {
+public:
+ MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
- if (keyCt > _config.jsMaxKeys) {
- // too many keys for JS, switch to mixed
- _bailFromJS(BSONObj(), this);
- // then fall through to check map size
- }
- else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
- // reduce now to lower mem usage
- Timer t;
- _scope->invoke(_reduceAll, 0, 0, 0, true);
- LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt
- << " newKeys=" << _scope->getNumberInt("_keyCt") << " time="
- << t.millis() << "ms" << endl;
- return;
- }
- }
+ virtual bool slaveOk() const {
+ return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet;
+ }
- if (_jsMode)
- return;
-
- if (_size > _config.maxInMemSize || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
- // attempt to reduce in memory map, if memory is too high or we have many duplicates
- long oldSize = _size;
- Timer t;
- reduceInMemory();
- LOG(3) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount
- << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
-
- // if size is still high, or values are not reducing well, dump
- if ( _onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2) ) {
- dumpToInc();
- LOG(3) << " MR - dumping to db" << endl;
- }
- }
- }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- /**
- * emit that will be called by js function
- */
- BSONObj fast_emit( const BSONObj& args, void* data ) {
- uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
- uassert( 13069 , "an emit can't be more than half max bson size" , args.objsize() < ( BSONObjMaxUserSize / 2 ) );
-
- State* state = (State*) data;
- if ( args.firstElement().type() == Undefined ) {
- BSONObjBuilder b( args.objsize() );
- b.appendNull( "" );
- BSONObjIterator i( args );
- i.next();
- b.append( i.next() );
- state->emit( b.obj() );
- }
- else {
- state->emit( args );
- }
- return BSONObj();
- }
+ virtual void help(stringstream& help) const {
+ help << "Run a map/reduce operation on the server.\n";
+ help << "Note this is used for aggregation, not querying, in MongoDB.\n";
+ help << "http://dochub.mongodb.org/core/mapreduce";
+ }
- /**
- * function is called when we realize we cant use js mode for m/r on the 1st key
- */
- BSONObj _bailFromJS( const BSONObj& args, void* data ) {
- State* state = (State*) data;
- state->bailFromJS();
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- // emit this particular key if there is one
- if (!args.isEmpty()) {
- fast_emit(args, data);
- }
- return BSONObj();
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmd,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ Timer t;
+
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmd))
+ maybeDisableValidation.emplace(txn);
+
+ auto client = txn->getClient();
+
+ if (client->isInDirectClient()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation, "Cannot run mapReduce command from eval()"));
}
- /**
- * This class represents a map/reduce command executed on a single server
- */
- class MapReduceCommand : public Command {
- public:
- MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
+ CurOp* op = CurOp::get(txn);
- virtual bool slaveOk() const {
- return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet;
- }
+ Config config(dbname, cmd);
- virtual bool slaveOverrideOk() const { return true; }
+ LOG(1) << "mr ns: " << config.ns << endl;
- virtual void help( stringstream &help ) const {
- help << "Run a map/reduce operation on the server.\n";
- help << "Note this is used for aggregation, not querying, in MongoDB.\n";
- help << "http://dochub.mongodb.org/core/mapreduce";
- }
+ uassert(16149, "cannot run map reduce without the js engine", globalScriptEngine);
+
+ CollectionMetadataPtr collMetadata;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ // Prevent sharding state from changing during the MR.
+ unique_ptr<RangePreserver> rangePreserver;
+ {
+ AutoGetCollectionForRead ctx(txn, config.ns);
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ Collection* collection = ctx.getCollection();
+ if (collection) {
+ rangePreserver.reset(new RangePreserver(collection));
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmd,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- Timer t;
+ // Get metadata before we check our version, to make sure it doesn't increment
+ // in the meantime. Need to do this in the same lock scope as the block.
+ if (shardingState.needCollectionMetadata(client, config.ns)) {
+ collMetadata = shardingState.getCollectionMetadata(config.ns);
+ }
+ }
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmd))
- maybeDisableValidation.emplace(txn);
+ bool shouldHaveData = false;
- auto client = txn->getClient();
+ BSONObjBuilder countsBuilder;
+ BSONObjBuilder timingBuilder;
+ State state(txn, config);
+ if (!state.sourceExists()) {
+ errmsg = "ns doesn't exist";
+ return false;
+ }
+ if (state.isOnDisk()) {
+ // this means that it will be doing a write operation, make sure we are on Master
+ // ideally this check should be in slaveOk(), but at that point config is not known
+ NamespaceString nss(config.ns);
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ errmsg = "not master";
+ return false;
+ }
+ }
- if (client->isInDirectClient()) {
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- "Cannot run mapReduce command from eval()"));
- }
+ try {
+ state.init();
+ state.prepTempCollection();
+ ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+
+ int progressTotal = 0;
+ bool showTotal = true;
+ if (state.config().filter.isEmpty()) {
+ progressTotal = state.incomingDocuments();
+ } else {
+ showTotal = false;
+ // Set an arbitrary total > 0 so the meter will be activated.
+ progressTotal = 1;
+ }
- CurOp* op = CurOp::get(txn);
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeter& progress(op->setMessage_inlock(
+ "m/r: (1/3) emit phase", "M/R: (1/3) Emit Progress", progressTotal));
+ lk.unlock();
+ progress.showTotal(showTotal);
+ ProgressMeterHolder pm(progress);
- Config config( dbname , cmd );
+ // See cast on next line to 32 bit unsigned
+ wassert(config.limit < 0x4000000);
- LOG(1) << "mr ns: " << config.ns << endl;
+ long long mapTime = 0;
+ long long reduceTime = 0;
+ long long numInputs = 0;
- uassert( 16149 , "cannot run map reduce without the js engine", globalScriptEngine );
+ {
+ // We've got a cursor preventing migrations off, now re-establish our
+ // useful cursor.
- CollectionMetadataPtr collMetadata;
+ const NamespaceString nss(config.ns);
- // Prevent sharding state from changing during the MR.
- unique_ptr<RangePreserver> rangePreserver;
- {
- AutoGetCollectionForRead ctx(txn, config.ns);
+ // Need lock and context to use it
+ unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_IS));
+ unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, nss.db(), MODE_S));
- Collection* collection = ctx.getCollection();
- if (collection) {
- rangePreserver.reset(new RangePreserver(collection));
- }
+ const WhereCallbackReal whereCallback(txn, nss.db());
- // Get metadata before we check our version, to make sure it doesn't increment
- // in the meantime. Need to do this in the same lock scope as the block.
- if (shardingState.needCollectionMetadata(client, config.ns)) {
- collMetadata = shardingState.getCollectionMetadata( config.ns );
- }
+ CanonicalQuery* cqRaw;
+ if (!CanonicalQuery::canonicalize(
+ config.ns, config.filter, config.sort, BSONObj(), &cqRaw, whereCallback)
+ .isOK()) {
+ uasserted(17238, "Can't canonicalize query " + config.filter.toString());
+ return 0;
}
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
- bool shouldHaveData = false;
+ Database* db = scopedAutoDb->getDb();
+ Collection* coll = state.getCollectionOrUassert(db, config.ns);
+ invariant(coll);
- BSONObjBuilder countsBuilder;
- BSONObjBuilder timingBuilder;
- State state( txn, config );
- if ( ! state.sourceExists() ) {
- errmsg = "ns doesn't exist";
- return false;
- }
- if (state.isOnDisk()) {
- // this means that it will be doing a write operation, make sure we are on Master
- // ideally this check should be in slaveOk(), but at that point config is not known
- NamespaceString nss(config.ns);
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- errmsg = "not master";
- return false;
- }
+ PlanExecutor* rawExec;
+ if (!getExecutor(txn, coll, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec)
+ .isOK()) {
+ uasserted(17239, "Can't get executor for query " + config.filter.toString());
+ return 0;
}
- try {
- state.init();
- state.prepTempCollection();
- ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+ unique_ptr<PlanExecutor> exec(rawExec);
- int progressTotal = 0;
- bool showTotal = true;
- if ( state.config().filter.isEmpty() ) {
- progressTotal = state.incomingDocuments();
- }
- else {
- showTotal = false;
- // Set an arbitrary total > 0 so the meter will be activated.
- progressTotal = 1;
- }
+ Timer mt;
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeter& progress( op->setMessage_inlock("m/r: (1/3) emit phase",
- "M/R: (1/3) Emit Progress",
- progressTotal ));
- lk.unlock();
- progress.showTotal(showTotal);
- ProgressMeterHolder pm(progress);
-
- // See cast on next line to 32 bit unsigned
- wassert(config.limit < 0x4000000);
-
- long long mapTime = 0;
- long long reduceTime = 0;
- long long numInputs = 0;
-
- {
- // We've got a cursor preventing migrations off, now re-establish our
- // useful cursor.
-
- const NamespaceString nss(config.ns);
-
- // Need lock and context to use it
- unique_ptr<ScopedTransaction> scopedXact(
- new ScopedTransaction(txn, MODE_IS));
- unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, nss.db(), MODE_S));
-
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- CanonicalQuery* cqRaw;
- if (!CanonicalQuery::canonicalize(config.ns,
- config.filter,
- config.sort,
- BSONObj(),
- &cqRaw,
- whereCallback).isOK()) {
- uasserted(17238, "Can't canonicalize query " + config.filter.toString());
- return 0;
- }
- std::unique_ptr<CanonicalQuery> cq(cqRaw);
-
- Database* db = scopedAutoDb->getDb();
- Collection* coll = state.getCollectionOrUassert(db, config.ns);
- invariant(coll);
-
- PlanExecutor* rawExec;
- if (!getExecutor(txn,
- coll,
- cq.release(),
- PlanExecutor::YIELD_AUTO,
- &rawExec).isOK()) {
- uasserted(17239, "Can't get executor for query "
- + config.filter.toString());
- return 0;
+ // go through each doc
+ BSONObj o;
+ while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
+ // check to see if this is a new object we don't own yet
+ // because of a chunk migration
+ if (collMetadata) {
+ ShardKeyPattern kp(collMetadata->getKeyPattern());
+ if (!collMetadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
+ continue;
}
+ }
- unique_ptr<PlanExecutor> exec(rawExec);
-
- Timer mt;
-
- // go through each doc
- BSONObj o;
- while (PlanExecutor::ADVANCED == exec->getNext(&o, NULL)) {
- // check to see if this is a new object we don't own yet
- // because of a chunk migration
- if ( collMetadata ) {
- ShardKeyPattern kp( collMetadata->getKeyPattern() );
- if (!collMetadata->keyBelongsToMe(kp.extractShardKeyFromDoc(o))) {
- continue;
- }
- }
-
- // do map
- if ( config.verbose ) mt.reset();
- config.mapper->map( o );
- if ( config.verbose ) mapTime += mt.micros();
-
- // Check if the state accumulated so far needs to be written to a
- // collection. This may yield the DB lock temporarily and then
- // acquire it again.
- //
- numInputs++;
- if (numInputs % 100 == 0) {
- Timer t;
-
- // TODO: As an optimization, we might want to do the save/restore
- // state and yield inside the reduceAndSpillInMemoryState method, so
- // it only happens if necessary.
- exec->saveState();
-
- scopedAutoDb.reset();
- scopedXact.reset();
-
- state.reduceAndSpillInMemoryStateIfNeeded();
-
- scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
- scopedAutoDb.reset(new AutoGetDb(txn, nss.db(), MODE_S));
-
- exec->restoreState(txn);
-
- // Need to reload the database, in case it was dropped after we
- // released the lock
- db = scopedAutoDb->getDb();
- if (db == NULL) {
- // Database was deleted after we freed the lock
- StringBuilder sb;
- sb << "Database "
- << nss.db()
- << " was deleted in the middle of the reduce job.";
- uasserted(28523, sb.str());
- }
-
- reduceTime += t.micros();
-
- txn->checkForInterrupt();
- }
-
- pm.hit();
-
- if (config.limit && numInputs >= config.limit)
- break;
+ // do map
+ if (config.verbose)
+ mt.reset();
+ config.mapper->map(o);
+ if (config.verbose)
+ mapTime += mt.micros();
+
+ // Check if the state accumulated so far needs to be written to a
+ // collection. This may yield the DB lock temporarily and then
+ // acquire it again.
+ //
+ numInputs++;
+ if (numInputs % 100 == 0) {
+ Timer t;
+
+ // TODO: As an optimization, we might want to do the save/restore
+ // state and yield inside the reduceAndSpillInMemoryState method, so
+ // it only happens if necessary.
+ exec->saveState();
+
+ scopedAutoDb.reset();
+ scopedXact.reset();
+
+ state.reduceAndSpillInMemoryStateIfNeeded();
+
+ scopedXact.reset(new ScopedTransaction(txn, MODE_IS));
+ scopedAutoDb.reset(new AutoGetDb(txn, nss.db(), MODE_S));
+
+ exec->restoreState(txn);
+
+ // Need to reload the database, in case it was dropped after we
+ // released the lock
+ db = scopedAutoDb->getDb();
+ if (db == NULL) {
+ // Database was deleted after we freed the lock
+ StringBuilder sb;
+ sb << "Database " << nss.db()
+ << " was deleted in the middle of the reduce job.";
+ uasserted(28523, sb.str());
}
- }
- pm.finished();
- txn->checkForInterrupt();
+ reduceTime += t.micros();
- // update counters
- countsBuilder.appendNumber("input", numInputs);
- countsBuilder.appendNumber( "emit" , state.numEmits() );
- if ( state.numEmits() )
- shouldHaveData = true;
+ txn->checkForInterrupt();
+ }
- timingBuilder.appendNumber( "mapTime" , mapTime / 1000 );
- timingBuilder.append( "emitLoop" , t.millis() );
+ pm.hit();
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- op->setMessage_inlock("m/r: (2/3) final reduce in memory",
- "M/R: (2/3) Final In-Memory Reduce Progress");
- }
- Timer rt;
- // do reduce in memory
- // this will be the last reduce needed for inline mode
- state.reduceInMemory();
- // if not inline: dump the in memory map to inc collection, all data is on disk
- state.dumpToInc();
- // final reduce
- state.finalReduce(op , pm );
- reduceTime += rt.micros();
- countsBuilder.appendNumber( "reduce" , state.numReduces() );
- timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
- timingBuilder.append( "mode" , state.jsMode() ? "js" : "mixed" );
-
- long long finalCount = state.postProcessCollection(txn, op, pm);
- state.appendResults( result );
-
- timingBuilder.appendNumber( "total" , t.millis() );
- result.appendNumber( "timeMillis" , t.millis() );
- countsBuilder.appendNumber( "output" , finalCount );
- if ( config.verbose ) result.append( "timing" , timingBuilder.obj() );
- result.append( "counts" , countsBuilder.obj() );
-
- if ( finalCount == 0 && shouldHaveData ) {
- result.append( "cmd" , cmd );
- errmsg = "there were emits but no data!";
- return false;
- }
- }
- catch( SendStaleConfigException& e ){
- log() << "mr detected stale config, should retry" << causedBy(e) << endl;
- throw e;
- }
- // TODO: The error handling code for queries is v. fragile,
- // *requires* rethrow AssertionExceptions - should probably fix.
- catch ( AssertionException& e ){
- log() << "mr failed, removing collection" << causedBy(e) << endl;
- throw e;
- }
- catch ( std::exception& e ){
- log() << "mr failed, removing collection" << causedBy(e) << endl;
- throw e;
- }
- catch ( ... ) {
- log() << "mr failed for unknown reason, removing collection" << endl;
- throw;
+ if (config.limit && numInputs >= config.limit)
+ break;
}
-
- return true;
}
+ pm.finished();
+
+ txn->checkForInterrupt();
+
+ // update counters
+ countsBuilder.appendNumber("input", numInputs);
+ countsBuilder.appendNumber("emit", state.numEmits());
+ if (state.numEmits())
+ shouldHaveData = true;
- } mapReduceCommand;
-
- /**
- * This class represents a map/reduce command executed on the output server of a sharded env
- */
- class MapReduceFinishCommand : public Command {
- public:
- void help(stringstream& h) const { h << "internal"; }
- MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ) {}
- virtual bool slaveOk() const {
- return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeReplSet;
+ timingBuilder.appendNumber("mapTime", mapTime / 1000);
+ timingBuilder.append("emitLoop", t.millis());
+
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ op->setMessage_inlock("m/r: (2/3) final reduce in memory",
+ "M/R: (2/3) Final In-Memory Reduce Progress");
}
- virtual bool slaveOverrideOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ Timer rt;
+ // do reduce in memory
+ // this will be the last reduce needed for inline mode
+ state.reduceInMemory();
+ // if not inline: dump the in memory map to inc collection, all data is on disk
+ state.dumpToInc();
+ // final reduce
+ state.finalReduce(op, pm);
+ reduceTime += rt.micros();
+ countsBuilder.appendNumber("reduce", state.numReduces());
+ timingBuilder.appendNumber("reduceTime", reduceTime / 1000);
+ timingBuilder.append("mode", state.jsMode() ? "js" : "mixed");
+
+ long long finalCount = state.postProcessCollection(txn, op, pm);
+ state.appendResults(result);
+
+ timingBuilder.appendNumber("total", t.millis());
+ result.appendNumber("timeMillis", t.millis());
+ countsBuilder.appendNumber("output", finalCount);
+ if (config.verbose)
+ result.append("timing", timingBuilder.obj());
+ result.append("counts", countsBuilder.obj());
+
+ if (finalCount == 0 && shouldHaveData) {
+ result.append("cmd", cmd);
+ errmsg = "there were emits but no data!";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (shouldBypassDocumentValidationForCommand(cmdObj))
- maybeDisableValidation.emplace(txn);
-
- ShardedConnectionInfo::addHook();
- // legacy name
- string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
- verify( shardedOutputCollection.size() > 0 );
- string inputNS;
- if ( cmdObj["inputDB"].type() == String ) {
- inputNS = cmdObj["inputDB"].String() + "." + shardedOutputCollection;
- }
- else {
- inputNS = dbname + "." + shardedOutputCollection;
- }
+ } catch (SendStaleConfigException& e) {
+ log() << "mr detected stale config, should retry" << causedBy(e) << endl;
+ throw e;
+ }
+ // TODO: The error handling code for queries is v. fragile,
+ // *requires* rethrow AssertionExceptions - should probably fix.
+ catch (AssertionException& e) {
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ } catch (std::exception& e) {
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ } catch (...) {
+ log() << "mr failed for unknown reason, removing collection" << endl;
+ throw;
+ }
- CurOp * op = CurOp::get(txn);
+ return true;
+ }
- Config config( dbname , cmdObj.firstElement().embeddedObjectUserCheck() );
- State state(txn, config);
- state.init();
+} mapReduceCommand;
- // no need for incremental collection because records are already sorted
- state._useIncremental = false;
- config.incLong = config.tempNamespace;
+/**
+ * This class represents a map/reduce command executed on the output server of a sharded env
+ */
+class MapReduceFinishCommand : public Command {
+public:
+ void help(stringstream& h) const {
+ h << "internal";
+ }
+ MapReduceFinishCommand() : Command("mapreduce.shardedfinish") {}
+ virtual bool slaveOk() const {
+ return repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeReplSet;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::internal);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (shouldBypassDocumentValidationForCommand(cmdObj))
+ maybeDisableValidation.emplace(txn);
+
+ ShardedConnectionInfo::addHook();
+ // legacy name
+ string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
+ verify(shardedOutputCollection.size() > 0);
+ string inputNS;
+ if (cmdObj["inputDB"].type() == String) {
+ inputNS = cmdObj["inputDB"].String() + "." + shardedOutputCollection;
+ } else {
+ inputNS = dbname + "." + shardedOutputCollection;
+ }
- BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
- BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
+ CurOp* op = CurOp::get(txn);
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(op->setMessage_inlock("m/r: merge sort and reduce",
- "M/R Merge Sort and Reduce Progress"));
- lk.unlock();
- set<string> servers;
+ Config config(dbname, cmdObj.firstElement().embeddedObjectUserCheck());
+ State state(txn, config);
+ state.init();
- {
- // parse per shard results
- BSONObjIterator i(shardCounts);
- while (i.more()) {
- BSONElement e = i.next();
- servers.insert(e.fieldName());
- }
- }
+ // no need for incremental collection because records are already sorted
+ state._useIncremental = false;
+ config.incLong = config.tempNamespace;
- state.prepTempCollection();
- ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
-
- BSONList values;
- if (!config.outputOptions.outDB.empty()) {
- BSONObjBuilder loc;
- if ( !config.outputOptions.outDB.empty())
- loc.append( "db" , config.outputOptions.outDB );
- if ( !config.outputOptions.collectionName.empty() )
- loc.append( "collection" , config.outputOptions.collectionName );
- result.append("result", loc.obj());
- }
- else {
- if ( !config.outputOptions.collectionName.empty() )
- result.append( "result" , config.outputOptions.collectionName );
- }
+ BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
+ BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(op->setMessage_inlock("m/r: merge sort and reduce",
+ "M/R Merge Sort and Reduce Progress"));
+ lk.unlock();
+ set<string> servers;
+
+ {
+ // parse per shard results
+ BSONObjIterator i(shardCounts);
+ while (i.more()) {
+ BSONElement e = i.next();
+ servers.insert(e.fieldName());
+ }
+ }
- shared_ptr<DBConfig> confOut = status.getValue();
+ state.prepTempCollection();
+ ON_BLOCK_EXIT_OBJ(state, &State::dropTempCollections);
+
+ BSONList values;
+ if (!config.outputOptions.outDB.empty()) {
+ BSONObjBuilder loc;
+ if (!config.outputOptions.outDB.empty())
+ loc.append("db", config.outputOptions.outDB);
+ if (!config.outputOptions.collectionName.empty())
+ loc.append("collection", config.outputOptions.collectionName);
+ result.append("result", loc.obj());
+ } else {
+ if (!config.outputOptions.collectionName.empty())
+ result.append("result", config.outputOptions.collectionName);
+ }
- vector<ChunkPtr> chunks;
- if ( confOut->isSharded(config.outputOptions.finalNamespace) ) {
- ChunkManagerPtr cm = confOut->getChunkManager(
- config.outputOptions.finalNamespace);
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
- // Fetch result from other shards 1 chunk at a time. It would be better to do
- // just one big $or query, but then the sorting would not be efficient.
- const string shardName = shardingState.getShardName();
- const ChunkMap& chunkMap = cm->getChunkMap();
+ shared_ptr<DBConfig> confOut = status.getValue();
- for ( ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it ) {
- ChunkPtr chunk = it->second;
- if (chunk->getShardId() == shardName) {
- chunks.push_back(chunk);
- }
- }
- }
+ vector<ChunkPtr> chunks;
+ if (confOut->isSharded(config.outputOptions.finalNamespace)) {
+ ChunkManagerPtr cm = confOut->getChunkManager(config.outputOptions.finalNamespace);
- long long inputCount = 0;
- unsigned int index = 0;
- BSONObj query;
- BSONArrayBuilder chunkSizes;
- while (true) {
- ChunkPtr chunk;
- if (chunks.size() > 0) {
- chunk = chunks[index];
- BSONObjBuilder b;
- b.appendAs(chunk->getMin().firstElement(), "$gte");
- b.appendAs(chunk->getMax().firstElement(), "$lt");
- query = BSON("_id" << b.obj());
-// chunkSizes.append(min);
- }
+ // Fetch result from other shards 1 chunk at a time. It would be better to do
+ // just one big $or query, but then the sorting would not be efficient.
+ const string shardName = shardingState.getShardName();
+ const ChunkMap& chunkMap = cm->getChunkMap();
- // reduce from each shard for a chunk
- BSONObj sortKey = BSON( "_id" << 1 );
- ParallelSortClusteredCursor cursor(servers, inputNS,
- Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
- cursor.init();
- int chunkSize = 0;
-
- while ( cursor.more() || !values.empty() ) {
- BSONObj t;
- if (cursor.more()) {
- t = cursor.next().getOwned();
- ++inputCount;
-
- if ( values.size() == 0 ) {
- values.push_back( t );
- continue;
- }
-
- if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
- values.push_back( t );
- continue;
- }
- }
+ for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
+ ChunkPtr chunk = it->second;
+ if (chunk->getShardId() == shardName) {
+ chunks.push_back(chunk);
+ }
+ }
+ }
- BSONObj res = config.reducer->finalReduce( values , config.finalizer.get());
- chunkSize += res.objsize();
- if (state.isOnDisk())
- state.insert( config.tempNamespace , res );
- else
- state.emit(res);
- values.clear();
- if (!t.isEmpty())
- values.push_back( t );
+ long long inputCount = 0;
+ unsigned int index = 0;
+ BSONObj query;
+ BSONArrayBuilder chunkSizes;
+ while (true) {
+ ChunkPtr chunk;
+ if (chunks.size() > 0) {
+ chunk = chunks[index];
+ BSONObjBuilder b;
+ b.appendAs(chunk->getMin().firstElement(), "$gte");
+ b.appendAs(chunk->getMax().firstElement(), "$lt");
+ query = BSON("_id" << b.obj());
+ // chunkSizes.append(min);
+ }
+
+ // reduce from each shard for a chunk
+ BSONObj sortKey = BSON("_id" << 1);
+ ParallelSortClusteredCursor cursor(
+ servers, inputNS, Query(query).sort(sortKey), QueryOption_NoCursorTimeout);
+ cursor.init();
+ int chunkSize = 0;
+
+ while (cursor.more() || !values.empty()) {
+ BSONObj t;
+ if (cursor.more()) {
+ t = cursor.next().getOwned();
+ ++inputCount;
+
+ if (values.size() == 0) {
+ values.push_back(t);
+ continue;
}
- if (chunk) {
- chunkSizes.append(chunk->getMin());
- chunkSizes.append(chunkSize);
+ if (t.woSortOrder(*(values.begin()), sortKey) == 0) {
+ values.push_back(t);
+ continue;
}
- if (++index >= chunks.size())
- break;
}
- // Forget temporary input collection, if output is sharded collection
- ShardConnection::forgetNS( inputNS );
+ BSONObj res = config.reducer->finalReduce(values, config.finalizer.get());
+ chunkSize += res.objsize();
+ if (state.isOnDisk())
+ state.insert(config.tempNamespace, res);
+ else
+ state.emit(res);
+ values.clear();
+ if (!t.isEmpty())
+ values.push_back(t);
+ }
- result.append( "chunkSizes" , chunkSizes.arr() );
+ if (chunk) {
+ chunkSizes.append(chunk->getMin());
+ chunkSizes.append(chunkSize);
+ }
+ if (++index >= chunks.size())
+ break;
+ }
- long long outputCount = state.postProcessCollection(txn, op, pm);
- state.appendResults( result );
+ // Forget temporary input collection, if output is sharded collection
+ ShardConnection::forgetNS(inputNS);
- BSONObjBuilder countsB(32);
- countsB.append("input", inputCount);
- countsB.append("reduce", state.numReduces());
- countsB.append("output", outputCount);
- result.append( "counts" , countsB.obj() );
+ result.append("chunkSizes", chunkSizes.arr());
- return 1;
- }
- } mapReduceFinishCommand;
+ long long outputCount = state.postProcessCollection(txn, op, pm);
+ state.appendResults(result);
- }
+ BSONObjBuilder countsB(32);
+ countsB.append("input", inputCount);
+ countsB.append("reduce", state.numReduces());
+ countsB.append("output", outputCount);
+ result.append("counts", countsB.obj());
+ return 1;
+ }
+} mapReduceFinishCommand;
+}
}
-
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
index 083165ebe27..8bc4264794e 100644
--- a/src/mongo/db/commands/mr.h
+++ b/src/mongo/db/commands/mr.h
@@ -42,347 +42,374 @@
namespace mongo {
- class Collection;
- class Database;
- class OperationContext;
-
- namespace mr {
-
- typedef std::vector<BSONObj> BSONList;
-
- class State;
-
- // ------------ function interfaces -----------
-
- class Mapper {
- MONGO_DISALLOW_COPYING(Mapper);
- public:
- virtual ~Mapper() {}
- virtual void init( State * state ) = 0;
-
- virtual void map( const BSONObj& o ) = 0;
- protected:
- Mapper() = default;
- };
-
- class Finalizer {
- MONGO_DISALLOW_COPYING(Finalizer);
- public:
- virtual ~Finalizer() {}
- virtual void init( State * state ) = 0;
-
- /**
- * this takes a tuple and returns a tuple
- */
- virtual BSONObj finalize( const BSONObj& tuple ) = 0;
-
- protected:
- Finalizer() = default;
- };
-
- class Reducer {
- MONGO_DISALLOW_COPYING(Reducer);
- public:
- Reducer() : numReduces(0) {}
- virtual ~Reducer() {}
- virtual void init( State * state ) = 0;
-
- virtual BSONObj reduce( const BSONList& tuples ) = 0;
- /** this means its a final reduce, even if there is no finalizer */
- virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
-
- long long numReduces;
- };
-
- // ------------ js function implementations -----------
-
- /**
- * used as a holder for Scope and ScriptingFunction
- * visitor like pattern as Scope is gotten from first access
- */
- class JSFunction {
- MONGO_DISALLOW_COPYING(JSFunction);
- public:
- /**
- * @param type (map|reduce|finalize)
- */
- JSFunction( const std::string& type , const BSONElement& e );
- virtual ~JSFunction() {}
-
- virtual void init( State * state );
-
- Scope * scope() const { return _scope; }
- ScriptingFunction func() const { return _func; }
-
- private:
- std::string _type;
- std::string _code; // actual javascript code
- BSONObj _wantedScope; // this is for CodeWScope
-
- Scope * _scope; // this is not owned by us, and might be shared
- ScriptingFunction _func;
- };
-
- class JSMapper : public Mapper {
- public:
- JSMapper( const BSONElement & code ) : _func( "_map" , code ) {}
- virtual void map( const BSONObj& o );
- virtual void init( State * state );
-
- private:
- JSFunction _func;
- BSONObj _params;
- };
-
- class JSReducer : public Reducer {
- public:
- JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
- virtual void init( State * state );
-
- virtual BSONObj reduce( const BSONList& tuples );
- virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
-
- private:
-
- /**
- * result in "__returnValue"
- * @param key OUT
- * @param endSizeEstimate OUT
- */
- void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
-
- JSFunction _func;
- };
-
- class JSFinalizer : public Finalizer {
- public:
- JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
- virtual BSONObj finalize( const BSONObj& o );
- virtual void init( State * state ) { _func.init( state ); }
- private:
- JSFunction _func;
-
- };
-
- // -----------------
-
-
- class TupleKeyCmp {
- public:
- TupleKeyCmp() {}
- bool operator()( const BSONObj &l, const BSONObj &r ) const {
- return l.firstElement().woCompare( r.firstElement() ) < 0;
- }
- };
-
- typedef std::map< BSONObj,BSONList,TupleKeyCmp > InMemory; // from key to list of tuples
-
- /**
- * holds map/reduce config information
- */
- class Config {
- public:
- Config( const std::string& _dbname , const BSONObj& cmdObj );
-
- std::string dbname;
- std::string ns;
-
- // options
- bool verbose;
- bool jsMode;
- int splitInfo;
-
- // query options
-
- BSONObj filter;
- BSONObj sort;
- long long limit;
-
- // functions
-
- std::unique_ptr<Mapper> mapper;
- std::unique_ptr<Reducer> reducer;
- std::unique_ptr<Finalizer> finalizer;
-
- BSONObj mapParams;
- BSONObj scopeSetup;
-
- // output tables
- std::string incLong;
- std::string tempNamespace;
-
- enum OutputType {
- REPLACE , // atomically replace the collection
- MERGE , // merge keys, override dups
- REDUCE , // merge keys, reduce dups
- INMEMORY // only store in memory, limited in size
- };
- struct OutputOptions {
- std::string outDB;
- std::string collectionName;
- std::string finalNamespace;
- // if true, no lock during output operation
- bool outNonAtomic;
- OutputType outType;
- } outputOptions;
-
- static OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj);
-
- // max number of keys allowed in JS map before switching mode
- long jsMaxKeys;
- // ratio of duplicates vs unique keys before reduce is triggered in js mode
- float reduceTriggerRatio;
- // maximum size of map before it gets dumped to disk
- long maxInMemSize;
-
- // true when called from mongos to do phase-1 of M/R
- bool shardedFirstPass;
-
- static AtomicUInt32 JOB_NUMBER;
- }; // end MRsetup
-
- /**
- * stores information about intermediate map reduce state
- * controls flow of data from map->reduce->finalize->output
- */
- class State {
- public:
- /**
- * txn must outlive this State.
- */
- State( OperationContext* txn, const Config& c );
- ~State();
-
- void init();
-
- // ---- prep -----
- bool sourceExists();
-
- long long incomingDocuments();
-
- // ---- map stage ----
+class Collection;
+class Database;
+class OperationContext;
- /**
- * stages on in in-memory storage
- */
- void emit( const BSONObj& a );
-
- /**
- * Checks the size of the transient in-memory results accumulated so far and potentially
- * runs reduce in order to compact them. If the data is still too large, it will be
- * spilled to the output collection.
- *
- * NOTE: Make sure that no DB locks are held, when calling this function, because it may
- * try to acquire write DB lock for the write to the output collection.
- */
- void reduceAndSpillInMemoryStateIfNeeded();
-
- /**
- * run reduce on _temp
- */
- void reduceInMemory();
-
- /**
- * transfers in memory storage to temp collection
- */
- void dumpToInc();
- void insertToInc( BSONObj& o );
- void _insertToInc( BSONObj& o );
-
- // ------ reduce stage -----------
-
- void prepTempCollection();
-
- void finalReduce( BSONList& values );
-
- void finalReduce( CurOp * op , ProgressMeterHolder& pm );
-
- // ------- cleanup/data positioning ----------
-
- /**
- * Clean up the temporary and incremental collections
- */
- void dropTempCollections();
-
- /**
- @return number objects in collection
- */
- long long postProcessCollection(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
- long long postProcessCollectionNonAtomic(
- OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
-
- /**
- * if INMEMORY will append
- * may also append stats or anything else it likes
- */
- void appendResults( BSONObjBuilder& b );
-
- // -------- util ------------
-
- /**
- * inserts with correct replication semantics
- */
- void insert( const std::string& ns , const BSONObj& o );
+namespace mr {
- // ------ simple accessors -----
-
- /** State maintains ownership, do no use past State lifetime */
- Scope* scope() { return _scope.get(); }
-
- const Config& config() { return _config; }
-
- bool isOnDisk() { return _onDisk; }
+typedef std::vector<BSONObj> BSONList;
- long long numEmits() const { if (_jsMode) return _scope->getNumberLongLong("_emitCt"); return _numEmits; }
- long long numReduces() const { if (_jsMode) return _scope->getNumberLongLong("_redCt"); return _config.reducer->numReduces; }
- long long numInMemKeys() const { if (_jsMode) return _scope->getNumberLongLong("_keyCt"); return _temp->size(); }
+class State;
- bool jsMode() {return _jsMode;}
- void switchMode(bool jsMode);
- void bailFromJS();
-
- Collection* getCollectionOrUassert(Database* db, StringData ns);
-
- const Config& _config;
- DBDirectClient _db;
- bool _useIncremental; // use an incremental collection
+// ------------ function interfaces -----------
- protected:
+class Mapper {
+ MONGO_DISALLOW_COPYING(Mapper);
- /**
- * Appends a new document to the in-memory list of tuples, which are under that
- * document's key.
- *
- * @return estimated in-memory size occupied by the newly added document.
- */
- int _add(InMemory* im , const BSONObj& a);
+public:
+ virtual ~Mapper() {}
+ virtual void init(State* state) = 0;
- OperationContext* _txn;
- std::unique_ptr<Scope> _scope;
- bool _onDisk; // if the end result of this map reduce is disk or not
+ virtual void map(const BSONObj& o) = 0;
- std::unique_ptr<InMemory> _temp;
- long _size; // bytes in _temp
- long _dupCount; // number of duplicate key entries
+protected:
+ Mapper() = default;
+};
- long long _numEmits;
-
- bool _jsMode;
- ScriptingFunction _reduceAll;
- ScriptingFunction _reduceAndEmit;
- ScriptingFunction _reduceAndFinalize;
- ScriptingFunction _reduceAndFinalizeAndInsert;
- };
+class Finalizer {
+ MONGO_DISALLOW_COPYING(Finalizer);
- BSONObj fast_emit( const BSONObj& args, void* data );
- BSONObj _bailFromJS( const BSONObj& args, void* data );
+public:
+ virtual ~Finalizer() {}
+ virtual void init(State* state) = 0;
- void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
- } // end mr namespace
-}
+ /**
+ * this takes a tuple and returns a tuple
+ */
+ virtual BSONObj finalize(const BSONObj& tuple) = 0;
+
+protected:
+ Finalizer() = default;
+};
+
+class Reducer {
+ MONGO_DISALLOW_COPYING(Reducer);
+
+public:
+ Reducer() : numReduces(0) {}
+ virtual ~Reducer() {}
+ virtual void init(State* state) = 0;
+ virtual BSONObj reduce(const BSONList& tuples) = 0;
+ /** this means its a final reduce, even if there is no finalizer */
+ virtual BSONObj finalReduce(const BSONList& tuples, Finalizer* finalizer) = 0;
+ long long numReduces;
+};
+
+// ------------ js function implementations -----------
+
+/**
+ * used as a holder for Scope and ScriptingFunction
+ * visitor like pattern as Scope is gotten from first access
+ */
+class JSFunction {
+ MONGO_DISALLOW_COPYING(JSFunction);
+
+public:
+ /**
+ * @param type (map|reduce|finalize)
+ */
+ JSFunction(const std::string& type, const BSONElement& e);
+ virtual ~JSFunction() {}
+
+ virtual void init(State* state);
+
+ Scope* scope() const {
+ return _scope;
+ }
+ ScriptingFunction func() const {
+ return _func;
+ }
+
+private:
+ std::string _type;
+ std::string _code; // actual javascript code
+ BSONObj _wantedScope; // this is for CodeWScope
+
+ Scope* _scope; // this is not owned by us, and might be shared
+ ScriptingFunction _func;
+};
+
+class JSMapper : public Mapper {
+public:
+ JSMapper(const BSONElement& code) : _func("_map", code) {}
+ virtual void map(const BSONObj& o);
+ virtual void init(State* state);
+
+private:
+ JSFunction _func;
+ BSONObj _params;
+};
+
+class JSReducer : public Reducer {
+public:
+ JSReducer(const BSONElement& code) : _func("_reduce", code) {}
+ virtual void init(State* state);
+
+ virtual BSONObj reduce(const BSONList& tuples);
+ virtual BSONObj finalReduce(const BSONList& tuples, Finalizer* finalizer);
+
+private:
+ /**
+ * result in "__returnValue"
+ * @param key OUT
+ * @param endSizeEstimate OUT
+ */
+ void _reduce(const BSONList& values, BSONObj& key, int& endSizeEstimate);
+
+ JSFunction _func;
+};
+
+class JSFinalizer : public Finalizer {
+public:
+ JSFinalizer(const BSONElement& code) : _func("_finalize", code) {}
+ virtual BSONObj finalize(const BSONObj& o);
+ virtual void init(State* state) {
+ _func.init(state);
+ }
+
+private:
+ JSFunction _func;
+};
+
+// -----------------
+
+
+class TupleKeyCmp {
+public:
+ TupleKeyCmp() {}
+ bool operator()(const BSONObj& l, const BSONObj& r) const {
+ return l.firstElement().woCompare(r.firstElement()) < 0;
+ }
+};
+
+typedef std::map<BSONObj, BSONList, TupleKeyCmp> InMemory; // from key to list of tuples
+
+/**
+ * holds map/reduce config information
+ */
+class Config {
+public:
+ Config(const std::string& _dbname, const BSONObj& cmdObj);
+
+ std::string dbname;
+ std::string ns;
+
+ // options
+ bool verbose;
+ bool jsMode;
+ int splitInfo;
+
+ // query options
+
+ BSONObj filter;
+ BSONObj sort;
+ long long limit;
+
+ // functions
+
+ std::unique_ptr<Mapper> mapper;
+ std::unique_ptr<Reducer> reducer;
+ std::unique_ptr<Finalizer> finalizer;
+
+ BSONObj mapParams;
+ BSONObj scopeSetup;
+
+ // output tables
+ std::string incLong;
+ std::string tempNamespace;
+
+ enum OutputType {
+ REPLACE, // atomically replace the collection
+ MERGE, // merge keys, override dups
+ REDUCE, // merge keys, reduce dups
+ INMEMORY // only store in memory, limited in size
+ };
+ struct OutputOptions {
+ std::string outDB;
+ std::string collectionName;
+ std::string finalNamespace;
+ // if true, no lock during output operation
+ bool outNonAtomic;
+ OutputType outType;
+ } outputOptions;
+
+ static OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj);
+
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
+ // true when called from mongos to do phase-1 of M/R
+ bool shardedFirstPass;
+
+ static AtomicUInt32 JOB_NUMBER;
+}; // end MRsetup
+
+/**
+ * stores information about intermediate map reduce state
+ * controls flow of data from map->reduce->finalize->output
+ */
+class State {
+public:
+ /**
+ * txn must outlive this State.
+ */
+ State(OperationContext* txn, const Config& c);
+ ~State();
+
+ void init();
+
+ // ---- prep -----
+ bool sourceExists();
+
+ long long incomingDocuments();
+
+ // ---- map stage ----
+
+ /**
+ * stages on in in-memory storage
+ */
+ void emit(const BSONObj& a);
+
+ /**
+ * Checks the size of the transient in-memory results accumulated so far and potentially
+ * runs reduce in order to compact them. If the data is still too large, it will be
+ * spilled to the output collection.
+ *
+ * NOTE: Make sure that no DB locks are held, when calling this function, because it may
+ * try to acquire write DB lock for the write to the output collection.
+ */
+ void reduceAndSpillInMemoryStateIfNeeded();
+
+ /**
+ * run reduce on _temp
+ */
+ void reduceInMemory();
+
+ /**
+ * transfers in memory storage to temp collection
+ */
+ void dumpToInc();
+ void insertToInc(BSONObj& o);
+ void _insertToInc(BSONObj& o);
+
+ // ------ reduce stage -----------
+
+ void prepTempCollection();
+
+ void finalReduce(BSONList& values);
+
+ void finalReduce(CurOp* op, ProgressMeterHolder& pm);
+
+ // ------- cleanup/data positioning ----------
+
+ /**
+ * Clean up the temporary and incremental collections
+ */
+ void dropTempCollections();
+
+ /**
+ @return number objects in collection
+ */
+ long long postProcessCollection(OperationContext* txn, CurOp* op, ProgressMeterHolder& pm);
+ long long postProcessCollectionNonAtomic(OperationContext* txn,
+ CurOp* op,
+ ProgressMeterHolder& pm);
+
+ /**
+ * if INMEMORY will append
+ * may also append stats or anything else it likes
+ */
+ void appendResults(BSONObjBuilder& b);
+
+ // -------- util ------------
+
+ /**
+ * inserts with correct replication semantics
+ */
+ void insert(const std::string& ns, const BSONObj& o);
+
+ // ------ simple accessors -----
+
+ /** State maintains ownership, do no use past State lifetime */
+ Scope* scope() {
+ return _scope.get();
+ }
+
+ const Config& config() {
+ return _config;
+ }
+
+ bool isOnDisk() {
+ return _onDisk;
+ }
+
+ long long numEmits() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_emitCt");
+ return _numEmits;
+ }
+ long long numReduces() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_redCt");
+ return _config.reducer->numReduces;
+ }
+ long long numInMemKeys() const {
+ if (_jsMode)
+ return _scope->getNumberLongLong("_keyCt");
+ return _temp->size();
+ }
+
+ bool jsMode() {
+ return _jsMode;
+ }
+ void switchMode(bool jsMode);
+ void bailFromJS();
+
+ Collection* getCollectionOrUassert(Database* db, StringData ns);
+
+ const Config& _config;
+ DBDirectClient _db;
+ bool _useIncremental; // use an incremental collection
+
+protected:
+ /**
+ * Appends a new document to the in-memory list of tuples, which are under that
+ * document's key.
+ *
+ * @return estimated in-memory size occupied by the newly added document.
+ */
+ int _add(InMemory* im, const BSONObj& a);
+
+ OperationContext* _txn;
+ std::unique_ptr<Scope> _scope;
+ bool _onDisk; // if the end result of this map reduce is disk or not
+
+ std::unique_ptr<InMemory> _temp;
+ long _size; // bytes in _temp
+ long _dupCount; // number of duplicate key entries
+
+ long long _numEmits;
+
+ bool _jsMode;
+ ScriptingFunction _reduceAll;
+ ScriptingFunction _reduceAndEmit;
+ ScriptingFunction _reduceAndFinalize;
+ ScriptingFunction _reduceAndFinalizeAndInsert;
+};
+
+BSONObj fast_emit(const BSONObj& args, void* data);
+BSONObj _bailFromJS(const BSONObj& args, void* data);
+
+void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+} // end mr namespace
+}
diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp
index 64e65f3e501..4d11661fd2d 100644
--- a/src/mongo/db/commands/mr_common.cpp
+++ b/src/mongo/db/commands/mr_common.cpp
@@ -41,107 +41,96 @@
namespace mongo {
- namespace mr {
- Config::OutputOptions Config::parseOutputOptions(const std::string& dbname,
- const BSONObj& cmdObj) {
- Config::OutputOptions outputOptions;
-
- outputOptions.outNonAtomic = false;
- if (cmdObj["out"].type() == String) {
- outputOptions.collectionName = cmdObj["out"].String();
- outputOptions.outType = REPLACE;
- }
- else if (cmdObj["out"].type() == Object) {
- BSONObj o = cmdObj["out"].embeddedObject();
-
- if (o.hasElement("normal")) {
- outputOptions.outType = REPLACE;
- outputOptions.collectionName = o["normal"].String();
- }
- else if (o.hasElement("replace")) {
- outputOptions.outType = REPLACE;
- outputOptions.collectionName = o["replace"].String();
- }
- else if (o.hasElement("merge")) {
- outputOptions.outType = MERGE;
- outputOptions.collectionName = o["merge"].String();
- }
- else if (o.hasElement("reduce")) {
- outputOptions.outType = REDUCE;
- outputOptions.collectionName = o["reduce"].String();
- }
- else if (o.hasElement("inline")) {
- outputOptions.outType = INMEMORY;
- }
- else {
- uasserted(13522,
- str::stream() << "please specify one of "
- << "[replace|merge|reduce|inline] in 'out' object");
- }
-
- if (o.hasElement("db")) {
- outputOptions.outDB = o["db"].String();
- }
-
- if (o.hasElement("nonAtomic")) {
- outputOptions.outNonAtomic = o["nonAtomic"].Bool();
- if (outputOptions.outNonAtomic)
- uassert(15895,
- "nonAtomic option cannot be used with this output type",
- (outputOptions.outType == REDUCE ||
- outputOptions.outType == MERGE));
- }
- }
- else {
- uasserted(13606 , "'out' has to be a string or an object");
- }
-
- if (outputOptions.outType != INMEMORY) {
- outputOptions.finalNamespace = mongoutils::str::stream()
- << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB)
- << "." << outputOptions.collectionName;
- }
-
- return outputOptions;
+namespace mr {
+Config::OutputOptions Config::parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj) {
+ Config::OutputOptions outputOptions;
+
+ outputOptions.outNonAtomic = false;
+ if (cmdObj["out"].type() == String) {
+ outputOptions.collectionName = cmdObj["out"].String();
+ outputOptions.outType = REPLACE;
+ } else if (cmdObj["out"].type() == Object) {
+ BSONObj o = cmdObj["out"].embeddedObject();
+
+ if (o.hasElement("normal")) {
+ outputOptions.outType = REPLACE;
+ outputOptions.collectionName = o["normal"].String();
+ } else if (o.hasElement("replace")) {
+ outputOptions.outType = REPLACE;
+ outputOptions.collectionName = o["replace"].String();
+ } else if (o.hasElement("merge")) {
+ outputOptions.outType = MERGE;
+ outputOptions.collectionName = o["merge"].String();
+ } else if (o.hasElement("reduce")) {
+ outputOptions.outType = REDUCE;
+ outputOptions.collectionName = o["reduce"].String();
+ } else if (o.hasElement("inline")) {
+ outputOptions.outType = INMEMORY;
+ } else {
+ uasserted(13522,
+ str::stream() << "please specify one of "
+ << "[replace|merge|reduce|inline] in 'out' object");
}
- void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- Config::OutputOptions outputOptions = Config::parseOutputOptions(dbname, cmdObj);
-
- ResourcePattern inputResource(commandTemplate->parseResourcePattern(dbname, cmdObj));
- uassert(17142, mongoutils::str::stream() <<
- "Invalid input resource " << inputResource.toString(),
- inputResource.isExactNamespacePattern());
- out->push_back(Privilege(inputResource, ActionType::find));
-
- if (outputOptions.outType != Config::INMEMORY) {
- ActionSet outputActions;
- outputActions.addAction(ActionType::insert);
- if (outputOptions.outType == Config::REPLACE) {
- outputActions.addAction(ActionType::remove);
- }
- else {
- outputActions.addAction(ActionType::update);
- }
-
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- outputActions.addAction(ActionType::bypassDocumentValidation);
- }
-
- ResourcePattern outputResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(outputOptions.finalNamespace)));
- uassert(17143, mongoutils::str::stream() << "Invalid target namespace " <<
- outputResource.ns().ns(),
- outputResource.ns().isValid());
-
- // TODO: check if outputNs exists and add createCollection privilege if not
- out->push_back(Privilege(outputResource, outputActions));
- }
+ if (o.hasElement("db")) {
+ outputOptions.outDB = o["db"].String();
}
+
+ if (o.hasElement("nonAtomic")) {
+ outputOptions.outNonAtomic = o["nonAtomic"].Bool();
+ if (outputOptions.outNonAtomic)
+ uassert(15895,
+ "nonAtomic option cannot be used with this output type",
+ (outputOptions.outType == REDUCE || outputOptions.outType == MERGE));
+ }
+ } else {
+ uasserted(13606, "'out' has to be a string or an object");
+ }
+
+ if (outputOptions.outType != INMEMORY) {
+ outputOptions.finalNamespace = mongoutils::str::stream()
+ << (outputOptions.outDB.empty() ? dbname : outputOptions.outDB) << "."
+ << outputOptions.collectionName;
}
+ return outputOptions;
+}
+
+void addPrivilegesRequiredForMapReduce(Command* commandTemplate,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ Config::OutputOptions outputOptions = Config::parseOutputOptions(dbname, cmdObj);
+
+ ResourcePattern inputResource(commandTemplate->parseResourcePattern(dbname, cmdObj));
+ uassert(17142,
+ mongoutils::str::stream() << "Invalid input resource " << inputResource.toString(),
+ inputResource.isExactNamespacePattern());
+ out->push_back(Privilege(inputResource, ActionType::find));
+
+ if (outputOptions.outType != Config::INMEMORY) {
+ ActionSet outputActions;
+ outputActions.addAction(ActionType::insert);
+ if (outputOptions.outType == Config::REPLACE) {
+ outputActions.addAction(ActionType::remove);
+ } else {
+ outputActions.addAction(ActionType::update);
+ }
+
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ outputActions.addAction(ActionType::bypassDocumentValidation);
+ }
+
+ ResourcePattern outputResource(
+ ResourcePattern::forExactNamespace(NamespaceString(outputOptions.finalNamespace)));
+ uassert(17143,
+ mongoutils::str::stream() << "Invalid target namespace "
+ << outputResource.ns().ns(),
+ outputResource.ns().isValid());
+
+ // TODO: check if outputNs exists and add createCollection privilege if not
+ out->push_back(Privilege(outputResource, outputActions));
+ }
+}
+}
}
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 72c28154c9d..93e90851257 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -41,121 +41,171 @@ using namespace mongo;
namespace {
- /**
- * Tests for mr::Config
- */
+/**
+ * Tests for mr::Config
+ */
- /**
- * Helper function to verify field of mr::Config::OutputOptions.
- */
- template <typename T> void _compareOutputOptionField(const std::string& dbname,
- const std::string& cmdObjStr,
- const std::string& fieldName,
- const T& actual, const T& expected) {
- if (actual == expected) return;
- FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName << ": Expected: " << expected << ". Actual: " << actual);
- }
+/**
+ * Helper function to verify field of mr::Config::OutputOptions.
+ */
+template <typename T>
+void _compareOutputOptionField(const std::string& dbname,
+ const std::string& cmdObjStr,
+ const std::string& fieldName,
+ const T& actual,
+ const T& expected) {
+ if (actual == expected)
+ return;
+ FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
+ << fieldName << ": Expected: " << expected << ". Actual: " << actual);
+}
- /**
- * Returns string representation of mr::Config::OutputType
- */
- std::string _getOutTypeString(mr::Config::OutputType outType) {
- switch (outType) {
- case mr::Config::REPLACE: return "REPLACE";
- case mr::Config::MERGE: return "MERGE";
- case mr::Config::REDUCE: return "REDUCE";
- case mr::Config::INMEMORY: return "INMEMORY";
- }
- invariant(0);
+/**
+ * Returns string representation of mr::Config::OutputType
+ */
+std::string _getOutTypeString(mr::Config::OutputType outType) {
+ switch (outType) {
+ case mr::Config::REPLACE:
+ return "REPLACE";
+ case mr::Config::MERGE:
+ return "MERGE";
+ case mr::Config::REDUCE:
+ return "REDUCE";
+ case mr::Config::INMEMORY:
+ return "INMEMORY";
}
+ invariant(0);
+}
- /**
- * Test helper function to check expected result of parseOutputOptions.
- */
- void _testConfigParseOutputOptions(const std::string& dbname, const std::string& cmdObjStr,
- const std::string& expectedOutDb,
- const std::string& expectedCollectionName,
- const std::string& expectedFinalNamespace,
- bool expectedOutNonAtomic,
- mr::Config::OutputType expectedOutType) {
- const BSONObj cmdObj = fromjson(cmdObjStr);
- mr::Config::OutputOptions outputOptions = mr::Config::parseOutputOptions(dbname, cmdObj);
- _compareOutputOptionField(dbname, cmdObjStr, "outDb", outputOptions.outDB, expectedOutDb);
- _compareOutputOptionField(dbname, cmdObjStr, "collectionName",
- outputOptions.collectionName, expectedCollectionName);
- _compareOutputOptionField(dbname, cmdObjStr, "finalNamespace",
- outputOptions.finalNamespace, expectedFinalNamespace);
- _compareOutputOptionField(dbname, cmdObjStr, "outNonAtomic", outputOptions.outNonAtomic,
- expectedOutNonAtomic);
- _compareOutputOptionField(dbname, cmdObjStr, "outType",
- _getOutTypeString(outputOptions.outType),
- _getOutTypeString(expectedOutType));
- }
+/**
+ * Test helper function to check expected result of parseOutputOptions.
+ */
+void _testConfigParseOutputOptions(const std::string& dbname,
+ const std::string& cmdObjStr,
+ const std::string& expectedOutDb,
+ const std::string& expectedCollectionName,
+ const std::string& expectedFinalNamespace,
+ bool expectedOutNonAtomic,
+ mr::Config::OutputType expectedOutType) {
+ const BSONObj cmdObj = fromjson(cmdObjStr);
+ mr::Config::OutputOptions outputOptions = mr::Config::parseOutputOptions(dbname, cmdObj);
+ _compareOutputOptionField(dbname, cmdObjStr, "outDb", outputOptions.outDB, expectedOutDb);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "collectionName", outputOptions.collectionName, expectedCollectionName);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "finalNamespace", outputOptions.finalNamespace, expectedFinalNamespace);
+ _compareOutputOptionField(
+ dbname, cmdObjStr, "outNonAtomic", outputOptions.outNonAtomic, expectedOutNonAtomic);
+ _compareOutputOptionField(dbname,
+ cmdObjStr,
+ "outType",
+ _getOutTypeString(outputOptions.outType),
+ _getOutTypeString(expectedOutType));
+}
- /**
- * Tests for mr::Config::parseOutputOptions.
- */
- TEST(ConfigOutputOptionsTest, parseOutputOptions) {
- // Missing 'out' field.
- ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{}")), UserException);
- // 'out' must be either string or object.
- ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{out: 99}")),
- UserException);
- // 'out.nonAtomic' is not supported with normal, replace or inline.
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {normal: 'mycoll', nonAtomic: true}}")),
- UserException);
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {replace: 'mycoll', nonAtomic: true}}")),
- UserException);
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {inline: 'mycoll', nonAtomic: true}}")),
- UserException);
- // Unknown output specifer.
- ASSERT_THROWS(mr::Config::parseOutputOptions(
- "mydb",
- fromjson("{out: {no_such_out_type: 'mycoll'}}")),
- UserException);
+/**
+ * Tests for mr::Config::parseOutputOptions.
+ */
+TEST(ConfigOutputOptionsTest, parseOutputOptions) {
+ // Missing 'out' field.
+ ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{}")), UserException);
+ // 'out' must be either string or object.
+ ASSERT_THROWS(mr::Config::parseOutputOptions("mydb", fromjson("{out: 99}")), UserException);
+ // 'out.nonAtomic' is not supported with normal, replace or inline.
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {normal: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {replace: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ ASSERT_THROWS(mr::Config::parseOutputOptions(
+ "mydb", fromjson("{out: {inline: 'mycoll', nonAtomic: true}}")),
+ UserException);
+ // Unknown output specifer.
+ ASSERT_THROWS(
+ mr::Config::parseOutputOptions("mydb", fromjson("{out: {no_such_out_type: 'mycoll'}}")),
+ UserException);
- // 'out' is string.
- _testConfigParseOutputOptions("mydb", "{out: 'mycoll'}",
- "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
- // 'out' is object.
- _testConfigParseOutputOptions("mydb", "{out: {normal: 'mycoll'}}",
- "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
- // 'out.db' overrides dbname parameter
- _testConfigParseOutputOptions("mydb1", "{out: {replace: 'mycoll', db: 'mydb2'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- // 'out.nonAtomic' is supported with merge and reduce.
- _testConfigParseOutputOptions("mydb", "{out: {merge: 'mycoll', nonAtomic: true}}",
- "", "mycoll", "mydb.mycoll", true, mr::Config::MERGE);
- _testConfigParseOutputOptions("mydb", "{out: {reduce: 'mycoll', nonAtomic: true}}",
- "", "mycoll", "mydb.mycoll", true, mr::Config::REDUCE);
- // inline
- _testConfigParseOutputOptions("mydb1", "{out: {inline: 'mycoll', db: 'mydb2'}}",
- "mydb2", "", "", false, mr::Config::INMEMORY);
+ // 'out' is string.
+ _testConfigParseOutputOptions(
+ "mydb", "{out: 'mycoll'}", "", "mycoll", "mydb.mycoll", false, mr::Config::REPLACE);
+ // 'out' is object.
+ _testConfigParseOutputOptions("mydb",
+ "{out: {normal: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ false,
+ mr::Config::REPLACE);
+ // 'out.db' overrides dbname parameter
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {replace: 'mycoll', db: 'mydb2'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ // 'out.nonAtomic' is supported with merge and reduce.
+ _testConfigParseOutputOptions("mydb",
+ "{out: {merge: 'mycoll', nonAtomic: true}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ true,
+ mr::Config::MERGE);
+ _testConfigParseOutputOptions("mydb",
+ "{out: {reduce: 'mycoll', nonAtomic: true}}",
+ "",
+ "mycoll",
+ "mydb.mycoll",
+ true,
+ mr::Config::REDUCE);
+ // inline
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {inline: 'mycoll', db: 'mydb2'}}",
+ "mydb2",
+ "",
+ "",
+ false,
+ mr::Config::INMEMORY);
- // Order should not matter in fields of 'out' object.
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', normal: 'mycoll'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', replace: 'mycoll'}}",
- "mydb2", "mycoll", "mydb2.mycoll", false,
- mr::Config::REPLACE);
- _testConfigParseOutputOptions("mydb1", "{out: {nonAtomic: true, merge: 'mycoll'}}",
- "", "mycoll", "mydb1.mycoll", true,
- mr::Config::MERGE);
- _testConfigParseOutputOptions("mydb1", "{out: {nonAtomic: true, reduce: 'mycoll'}}",
- "", "mycoll", "mydb1.mycoll", true,
- mr::Config::REDUCE);
- _testConfigParseOutputOptions("mydb1", "{out: {db: 'mydb2', inline: 'mycoll'}}",
- "mydb2", "", "", false, mr::Config::INMEMORY);
- }
+ // Order should not matter in fields of 'out' object.
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', normal: 'mycoll'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', replace: 'mycoll'}}",
+ "mydb2",
+ "mycoll",
+ "mydb2.mycoll",
+ false,
+ mr::Config::REPLACE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {nonAtomic: true, merge: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb1.mycoll",
+ true,
+ mr::Config::MERGE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {nonAtomic: true, reduce: 'mycoll'}}",
+ "",
+ "mycoll",
+ "mydb1.mycoll",
+ true,
+ mr::Config::REDUCE);
+ _testConfigParseOutputOptions("mydb1",
+ "{out: {db: 'mydb2', inline: 'mycoll'}}",
+ "mydb2",
+ "",
+ "",
+ false,
+ mr::Config::INMEMORY);
+}
} // namespace
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index c512c2a8c8b..a9f1ad7e619 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -42,52 +42,60 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class AppendOplogNoteCmd : public Command {
- public:
- AppendOplogNoteCmd() : Command( "appendOplogNote" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "Adds a no-op entry to the oplog";
+class AppendOplogNoteCmd : public Command {
+public:
+ AppendOplogNoteCmd() : Command("appendOplogNote") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(stringstream& help) const {
+ help << "Adds a no-op entry to the oplog";
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::appendOplogNote)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::appendOplogNote)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ return Status::OK();
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NoReplicationEnabled,
+ "Must have replication set up to run \"appendOplogNote\""));
+ }
+ BSONElement dataElement;
+ Status status = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
- return appendCommandStatus(result, Status(
- ErrorCodes::NoReplicationEnabled,
- "Must have replication set up to run \"appendOplogNote\""));
- }
- BSONElement dataElement;
- Status status = bsonExtractTypedField(cmdObj, "data", Object, &dataElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWrite(txn->lockState());
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWrite(txn->lockState());
- WriteUnitOfWork wuow(txn);
- getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
- wuow.commit();
- return true;
- }
+ WriteUnitOfWork wuow(txn);
+ getGlobalServiceContext()->getOpObserver()->onOpMessage(txn, dataElement.Obj());
+ wuow.commit();
+ return true;
+ }
- } appendOplogNoteCmd;
+} appendOplogNoteCmd;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 36acc5d9bb8..33e24b6648e 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -41,128 +41,121 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- class ParallelCollectionScanCmd : public Command {
- public:
-
- struct ExtentInfo {
- ExtentInfo( RecordId dl, size_t s )
- : diskLoc(dl), size(s) {
- }
- RecordId diskLoc;
- size_t size;
- };
-
- // ------------------------------------------------
-
- ParallelCollectionScanCmd() : Command( "parallelCollectionScan" ){}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if ( AuthorizationSession::get(client)->isAuthorizedForPrivilege(p) )
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+using std::unique_ptr;
+using std::string;
+
+class ParallelCollectionScanCmd : public Command {
+public:
+ struct ExtentInfo {
+ ExtentInfo(RecordId dl, size_t s) : diskLoc(dl), size(s) {}
+ RecordId diskLoc;
+ size_t size;
+ };
+
+ // ------------------------------------------------
+
+ ParallelCollectionScanCmd() : Command("parallelCollectionScan") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ NamespaceString ns(dbname, cmdObj[name].String());
+
+ AutoGetCollectionForRead ctx(txn, ns.ns());
+
+ Collection* collection = ctx.getCollection();
+ if (!collection)
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "ns does not exist: " << ns.ns()));
+
+ size_t numCursors = static_cast<size_t>(cmdObj["numCursors"].numberInt());
+
+ if (numCursors == 0 || numCursors > 10000)
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue,
+ str::stream()
+ << "numCursors has to be between 1 and 10000"
+ << " was: " << numCursors));
+
+ auto iterators = collection->getManyCursors(txn);
+ if (iterators.size() < numCursors) {
+ numCursors = iterators.size();
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- NamespaceString ns( dbname, cmdObj[name].String() );
+ OwnedPointerVector<PlanExecutor> execs;
+ for (size_t i = 0; i < numCursors; i++) {
+ WorkingSet* ws = new WorkingSet();
+ MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
- AutoGetCollectionForRead ctx(txn, ns.ns());
+ PlanExecutor* rawExec;
+ // Takes ownership of 'ws' and 'mis'.
+ Status execStatus =
+ PlanExecutor::make(txn, ws, mis, collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ unique_ptr<PlanExecutor> curExec(rawExec);
- Collection* collection = ctx.getCollection();
- if ( !collection )
- return appendCommandStatus( result,
- Status( ErrorCodes::NamespaceNotFound,
- str::stream() <<
- "ns does not exist: " << ns.ns() ) );
-
- size_t numCursors = static_cast<size_t>( cmdObj["numCursors"].numberInt() );
-
- if ( numCursors == 0 || numCursors > 10000 )
- return appendCommandStatus( result,
- Status( ErrorCodes::BadValue,
- str::stream() <<
- "numCursors has to be between 1 and 10000" <<
- " was: " << numCursors ) );
-
- auto iterators = collection->getManyCursors(txn);
- if (iterators.size() < numCursors) {
- numCursors = iterators.size();
- }
+ // The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
+ // We have to deregister it, as it will be registered with ClientCursor.
+ curExec->deregisterExec();
- OwnedPointerVector<PlanExecutor> execs;
- for ( size_t i = 0; i < numCursors; i++ ) {
- WorkingSet* ws = new WorkingSet();
- MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
+ // Need to save state while yielding locks between now and getMore().
+ curExec->saveState();
- PlanExecutor* rawExec;
- // Takes ownership of 'ws' and 'mis'.
- Status execStatus = PlanExecutor::make(txn, ws, mis, collection,
- PlanExecutor::YIELD_AUTO, &rawExec);
- invariant(execStatus.isOK());
- unique_ptr<PlanExecutor> curExec(rawExec);
+ execs.push_back(curExec.release());
+ }
- // The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
- // We have to deregister it, as it will be registered with ClientCursor.
- curExec->deregisterExec();
+ // transfer iterators to executors using a round-robin distribution.
+ // TODO consider using a common work queue once invalidation issues go away.
+ for (size_t i = 0; i < iterators.size(); i++) {
+ PlanExecutor* theExec = execs[i % execs.size()];
+ MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());
- // Need to save state while yielding locks between now and getMore().
- curExec->saveState();
+ // This wasn't called above as they weren't assigned yet
+ iterators[i]->savePositioned();
- execs.push_back(curExec.release());
- }
+ mis->addIterator(std::move(iterators[i]));
+ }
- // transfer iterators to executors using a round-robin distribution.
- // TODO consider using a common work queue once invalidation issues go away.
- for (size_t i = 0; i < iterators.size(); i++) {
- PlanExecutor* theExec = execs[i % execs.size()];
- MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());
+ {
+ BSONArrayBuilder bucketsBuilder;
+ for (size_t i = 0; i < execs.size(); i++) {
+ // transfer ownership of an executor to the ClientCursor (which manages its own
+ // lifetime).
+ ClientCursor* cc =
+ new ClientCursor(collection->getCursorManager(), execs.releaseAt(i), ns.ns());
- // This wasn't called above as they weren't assigned yet
- iterators[i]->savePositioned();
+ BSONObjBuilder threadResult;
+ appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &threadResult);
+ threadResult.appendBool("ok", 1);
- mis->addIterator(std::move(iterators[i]));
+ bucketsBuilder.append(threadResult.obj());
}
-
- {
- BSONArrayBuilder bucketsBuilder;
- for (size_t i = 0; i < execs.size(); i++) {
- // transfer ownership of an executor to the ClientCursor (which manages its own
- // lifetime).
- ClientCursor* cc = new ClientCursor( collection->getCursorManager(),
- execs.releaseAt(i),
- ns.ns() );
-
- BSONObjBuilder threadResult;
- appendCursorResponseObject( cc->cursorid(),
- ns.ns(),
- BSONArray(),
- &threadResult );
- threadResult.appendBool( "ok", 1 );
-
- bucketsBuilder.append( threadResult.obj() );
- }
- result.appendArray( "cursors", bucketsBuilder.obj() );
- }
-
- return true;
-
+ result.appendArray("cursors", bucketsBuilder.obj());
}
- } parallelCollectionScanCmd;
+ return true;
+ }
+} parallelCollectionScanCmd;
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index c9048ee54c2..7ff531bb8a1 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -51,556 +51,549 @@ using std::stringstream;
namespace mongo {
- namespace {
- void appendParameterNames( stringstream& help ) {
- help << "supported:\n";
- const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
- for ( ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i ) {
- help << " " << i->first << "\n";
- }
- }
+namespace {
+void appendParameterNames(stringstream& help) {
+ help << "supported:\n";
+ const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
+ for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
+ help << " " << i->first << "\n";
}
+}
+}
- class CmdGet : public Command {
- public:
- CmdGet() : Command( "getParameter" ) { }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::getParameter);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+class CmdGet : public Command {
+public:
+ CmdGet() : Command("getParameter") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::getParameter);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "get administrative option(s)\nexample:\n";
+ help << "{ getParameter:1, notablescan:1 }\n";
+ appendParameterNames(help);
+ help << "{ getParameter:'*' } to get everything\n";
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ bool all = *cmdObj.firstElement().valuestrsafe() == '*';
+
+ int before = result.len();
+
+ const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
+ for (ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i) {
+ if (all || cmdObj.hasElement(i->first.c_str())) {
+ i->second->append(txn, result, i->second->name());
+ }
}
- virtual void help( stringstream &help ) const {
- help << "get administrative option(s)\nexample:\n";
- help << "{ getParameter:1, notablescan:1 }\n";
- appendParameterNames( help );
- help << "{ getParameter:'*' } to get everything\n";
+
+ if (before == result.len()) {
+ errmsg = "no option found to get";
+ return false;
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- bool all = *cmdObj.firstElement().valuestrsafe() == '*';
-
- int before = result.len();
-
- const ServerParameter::Map& m = ServerParameterSet::getGlobal()->getMap();
- for ( ServerParameter::Map::const_iterator i = m.begin(); i != m.end(); ++i ) {
- if ( all || cmdObj.hasElement( i->first.c_str() ) ) {
- i->second->append(txn, result, i->second->name() );
- }
- }
+ return true;
+ }
+} cmdGet;
- if ( before == result.len() ) {
- errmsg = "no option found to get";
+class CmdSet : public Command {
+public:
+ CmdSet() : Command("setParameter") {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::setParameter);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual void help(stringstream& help) const {
+ help << "set administrative option(s)\n";
+ help << "{ setParameter:1, <param>:<value> }\n";
+ appendParameterNames(help);
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ int numSet = 0;
+ bool found = false;
+
+ const ServerParameter::Map& parameterMap = ServerParameterSet::getGlobal()->getMap();
+
+ // First check that we aren't setting the same parameter twice and that we actually are
+ // setting parameters that we have registered and can change at runtime
+ BSONObjIterator parameterCheckIterator(cmdObj);
+
+ // We already know that "setParameter" will be the first element in this object, so skip
+ // past that
+ parameterCheckIterator.next();
+
+ // Set of all the parameters the user is attempting to change
+ std::map<std::string, BSONElement> parametersToSet;
+
+ // Iterate all parameters the user passed in to do the initial validation checks,
+ // including verifying that we are not setting the same parameter twice.
+ while (parameterCheckIterator.more()) {
+ BSONElement parameter = parameterCheckIterator.next();
+ std::string parameterName = parameter.fieldName();
+
+ ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
+
+ // Check to see if this is actually a valid parameter
+ if (foundParameter == parameterMap.end()) {
+ errmsg = str::stream() << "attempted to set unrecognized parameter ["
+ << parameterName << "], use help:true to see options ";
return false;
}
- return true;
- }
- } cmdGet;
-
- class CmdSet : public Command {
- public:
- CmdSet() : Command( "setParameter" ) { }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::setParameter);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual void help( stringstream &help ) const {
- help << "set administrative option(s)\n";
- help << "{ setParameter:1, <param>:<value> }\n";
- appendParameterNames( help );
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- int numSet = 0;
- bool found = false;
-
- const ServerParameter::Map& parameterMap = ServerParameterSet::getGlobal()->getMap();
-
- // First check that we aren't setting the same parameter twice and that we actually are
- // setting parameters that we have registered and can change at runtime
- BSONObjIterator parameterCheckIterator(cmdObj);
-
- // We already know that "setParameter" will be the first element in this object, so skip
- // past that
- parameterCheckIterator.next();
-
- // Set of all the parameters the user is attempting to change
- std::map<std::string, BSONElement> parametersToSet;
-
- // Iterate all parameters the user passed in to do the initial validation checks,
- // including verifying that we are not setting the same parameter twice.
- while (parameterCheckIterator.more()) {
- BSONElement parameter = parameterCheckIterator.next();
- std::string parameterName = parameter.fieldName();
-
- ServerParameter::Map::const_iterator foundParameter =
- parameterMap.find(parameterName);
-
- // Check to see if this is actually a valid parameter
- if (foundParameter == parameterMap.end()) {
- errmsg = str::stream() << "attempted to set unrecognized parameter ["
- << parameterName
- << "], use help:true to see options ";
- return false;
- }
-
- // Make sure we are allowed to change this parameter
- if (!foundParameter->second->allowedToChangeAtRuntime()) {
- errmsg = str::stream() << "not allowed to change [" << parameterName
- << "] at runtime";
- return false;
- }
-
- // Make sure we are only setting this parameter once
- if (parametersToSet.count(parameterName)) {
- errmsg = str::stream() << "attempted to set parameter ["
- << parameterName
- << "] twice in the same setParameter command, "
- << "once to value: ["
- << parametersToSet[parameterName].toString(false)
- << "], and once to value: [" << parameter.toString(false)
- << "]";
- return false;
- }
-
- parametersToSet[parameterName] = parameter;
- }
- // Iterate the parameters that we have confirmed we are setting and actually set them.
- // Not that if setting any one parameter fails, the command will fail, but the user
- // won't see what has been set and what hasn't. See SERVER-8552.
- for (std::map<std::string, BSONElement>::iterator it = parametersToSet.begin();
- it != parametersToSet.end(); ++it) {
- BSONElement parameter = it->second;
- std::string parameterName = it->first;
-
- ServerParameter::Map::const_iterator foundParameter =
- parameterMap.find(parameterName);
-
- if (foundParameter == parameterMap.end()) {
- errmsg = str::stream() << "Parameter: " << parameterName << " that was "
- << "avaliable during our first lookup in the registered "
- << "parameters map is no longer available.";
- return false;
- }
-
- if (numSet == 0) {
- foundParameter->second->append(txn, result, "was");
- }
-
- Status status = foundParameter->second->set(parameter);
- if (status.isOK()) {
- numSet++;
- continue;
- }
-
- errmsg = status.reason();
- result.append("code", status.code());
+ // Make sure we are allowed to change this parameter
+ if (!foundParameter->second->allowedToChangeAtRuntime()) {
+ errmsg = str::stream() << "not allowed to change [" << parameterName
+ << "] at runtime";
return false;
}
- if (numSet == 0 && !found) {
- errmsg = "no option found to set, use help:true to see options ";
+ // Make sure we are only setting this parameter once
+ if (parametersToSet.count(parameterName)) {
+ errmsg = str::stream()
+ << "attempted to set parameter [" << parameterName
+ << "] twice in the same setParameter command, "
+ << "once to value: [" << parametersToSet[parameterName].toString(false)
+ << "], and once to value: [" << parameter.toString(false) << "]";
return false;
}
- return true;
+ parametersToSet[parameterName] = parameter;
}
- } cmdSet;
- namespace {
- using logger::globalLogDomain;
- using logger::LogComponent;
- using logger::LogComponentSetting;
- using logger::LogSeverity;
- using logger::parseLogComponentSettings;
-
- class LogLevelSetting : public ServerParameter {
- public:
- LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
+ // Iterate the parameters that we have confirmed we are setting and actually set them.
+ // Not that if setting any one parameter fails, the command will fail, but the user
+ // won't see what has been set and what hasn't. See SERVER-8552.
+ for (std::map<std::string, BSONElement>::iterator it = parametersToSet.begin();
+ it != parametersToSet.end();
+ ++it) {
+ BSONElement parameter = it->second;
+ std::string parameterName = it->first;
+
+ ServerParameter::Map::const_iterator foundParameter = parameterMap.find(parameterName);
+
+ if (foundParameter == parameterMap.end()) {
+ errmsg = str::stream() << "Parameter: " << parameterName << " that was "
+ << "avaliable during our first lookup in the registered "
+ << "parameters map is no longer available.";
+ return false;
}
- virtual Status set(const BSONElement& newValueElement) {
- int newValue;
- if (!newValueElement.coerce(&newValue) || newValue < 0)
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for logLevel: " << newValueElement);
- LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
- LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
- return Status::OK();
+ if (numSet == 0) {
+ foundParameter->second->append(txn, result, "was");
}
- virtual Status setFromString(const std::string& str) {
- int newValue;
- Status status = parseNumberFromString(str, &newValue);
- if (!status.isOK())
- return status;
- if (newValue < 0)
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for logLevel: " << newValue);
- LogSeverity newSeverity = (newValue > 0) ? LogSeverity::Debug(newValue) :
- LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
- return Status::OK();
- }
- } logLevelSetting;
-
- /**
- * Log component verbosity.
- * Log levels of log component hierarchy.
- * Negative value for a log component means the default log level will be used.
- */
- class LogComponentVerbositySetting : public ServerParameter {
- MONGO_DISALLOW_COPYING(LogComponentVerbositySetting);
- public:
- LogComponentVerbositySetting()
- : ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
-
- virtual void append(OperationContext* txn, BSONObjBuilder& b,
- const std::string& name) {
- BSONObj currentSettings;
- _get(&currentSettings);
- b << name << currentSettings;
+ Status status = foundParameter->second->set(parameter);
+ if (status.isOK()) {
+ numSet++;
+ continue;
}
- virtual Status set(const BSONElement& newValueElement) {
- if (!newValueElement.isABSONObj()) {
- return Status(ErrorCodes::TypeMismatch, mongoutils::str::stream() <<
- "log component verbosity is not a BSON object: " <<
- newValueElement);
- }
- return _set(newValueElement.Obj());
- }
+ errmsg = status.reason();
+ result.append("code", status.code());
+ return false;
+ }
- virtual Status setFromString(const std::string& str) {
- try {
- return _set(mongo::fromjson(str));
- }
- catch (const DBException& ex) {
- return ex.toStatus();
- }
- }
+ if (numSet == 0 && !found) {
+ errmsg = "no option found to set, use help:true to see options ";
+ return false;
+ }
- private:
- /**
- * Returns current settings as a BSON document.
- * The "default" log component is an implementation detail. Don't expose this to users.
- */
- void _get(BSONObj* output) const {
- static const string defaultLogComponentName =
- LogComponent(LogComponent::kDefault).getShortName();
-
- mutablebson::Document doc;
-
- for (int i = 0; i < int(LogComponent::kNumLogComponents); ++i) {
- LogComponent component = static_cast<LogComponent::Value>(i);
-
- int severity = -1;
- if (globalLogDomain()->hasMinimumLogSeverity(component)) {
- severity = globalLogDomain()->getMinimumLogSeverity(component).toInt();
- }
-
- // Save LogComponent::kDefault LogSeverity at root
- if (component == LogComponent::kDefault) {
- doc.root().appendInt("verbosity", severity);
- continue;
- }
-
- mutablebson::Element element = doc.makeElementObject(component.getShortName());
- element.appendInt("verbosity", severity);
-
- mutablebson::Element parentElement = _getParentElement(doc, component);
- parentElement.pushBack(element);
- }
-
- BSONObj result = doc.getObject();
- output->swap(result);
- invariant(!output->hasField(defaultLogComponentName));
- }
+ return true;
+ }
+} cmdSet;
- /**
- * Updates component hierarchy log levels.
- *
- * BSON Format:
- * {
- * verbosity: 4, <-- maps to 'default' log component.
- * componentA: {
- * verbosity: 2, <-- sets componentA's log level to 2.
- * componentB: {
- * verbosity: 1, <-- sets componentA.componentB's log level to 1.
- * }
- * componentC: {
- * verbosity: -1, <-- clears componentA.componentC's log level so that
- * its final loglevel will be inherited from componentA.
- * }
- * },
- * componentD : 3 <-- sets componentD's log level to 3 (alternative to
- * subdocument with 'verbosity' field).
- * }
- *
- * For the default component, the log level is read from the top-level
- * "verbosity" field.
- * For non-default components, we look up the element using the component's
- * dotted name. If the "<dotted component name>" field is a number, the log
- * level will be read from the field's value.
- * Otherwise, we assume that the "<dotted component name>" field is an
- * object with a "verbosity" field that holds the log level for the component.
- * The more verbose format with the "verbosity" field is intended to support
- * setting of log levels of both parent and child log components in the same
- * BSON document.
- *
- * Ignore elements in BSON object that do not map to a log component's dotted
- * name.
- */
- Status _set(const BSONObj& bsonSettings) const {
- StatusWith< std::vector<LogComponentSetting> > parseStatus =
- parseLogComponentSettings(bsonSettings);
-
- if (!parseStatus.isOK()) {
- return parseStatus.getStatus();
- }
-
- std::vector<LogComponentSetting> settings = parseStatus.getValue();
- std::vector<LogComponentSetting>::iterator it = settings.begin();
- for (; it < settings.end(); ++it) {
- LogComponentSetting newSetting = *it;
-
- // Negative value means to clear log level of component.
- if (newSetting.level < 0) {
- globalLogDomain()->clearMinimumLoggedSeverity(newSetting.component);
- continue;
- }
- // Convert non-negative value to Log()/Debug(N).
- LogSeverity newSeverity = (newSetting.level > 0) ?
- LogSeverity::Debug(newSetting.level) : LogSeverity::Log();
- globalLogDomain()->setMinimumLoggedSeverity(newSetting.component,
- newSeverity);
- }
-
- return Status::OK();
- }
+namespace {
+using logger::globalLogDomain;
+using logger::LogComponent;
+using logger::LogComponentSetting;
+using logger::LogSeverity;
+using logger::parseLogComponentSettings;
- /**
- * Search document for element corresponding to log component's parent.
- */
- static mutablebson::Element _getParentElement(mutablebson::Document& doc,
- LogComponent component) {
- // Hide LogComponent::kDefault
- if (component == LogComponent::kDefault) {
- return doc.end();
- }
- LogComponent parentComponent = component.parent();
-
- // Attach LogComponent::kDefault children to root
- if (parentComponent == LogComponent::kDefault) {
- return doc.root();
- }
- mutablebson::Element grandParentElement = _getParentElement(doc, parentComponent);
- return grandParentElement.findFirstChildNamed(parentComponent.getShortName());
- }
- } logComponentVerbositySetting;
-
- } // namespace
-
- namespace {
- class SSLModeSetting : public ServerParameter {
- public:
- SSLModeSetting() : ServerParameter(ServerParameterSet::getGlobal(), "sslMode",
- false, // allowedToChangeAtStartup
- true // allowedToChangeAtRuntime
- ) {}
-
- std::string sslModeStr() {
- switch (sslGlobalParams.sslMode.load()) {
- case SSLParams::SSLMode_disabled:
- return "disabled";
- case SSLParams::SSLMode_allowSSL:
- return "allowSSL";
- case SSLParams::SSLMode_preferSSL:
- return "preferSSL";
- case SSLParams::SSLMode_requireSSL:
- return "requireSSL";
- default:
- return "undefined";
- }
- }
+class LogLevelSetting : public ServerParameter {
+public:
+ LogLevelSetting() : ServerParameter(ServerParameterSet::getGlobal(), "logLevel") {}
- virtual void append(
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << sslModeStr();
- }
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << globalLogDomain()->getMinimumLogSeverity().toInt();
+ }
- virtual Status set(const BSONElement& newValueElement) {
- try {
- return setFromString(newValueElement.String());
- }
- catch (MsgAssertionException msg) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for sslMode via setParameter command: "
- << newValueElement);
- }
+ virtual Status set(const BSONElement& newValueElement) {
+ int newValue;
+ if (!newValueElement.coerce(&newValue) || newValue < 0)
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for logLevel: " << newValueElement);
+ LogSeverity newSeverity =
+ (newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
- }
+ virtual Status setFromString(const std::string& str) {
+ int newValue;
+ Status status = parseNumberFromString(str, &newValue);
+ if (!status.isOK())
+ return status;
+ if (newValue < 0)
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Invalid value for logLevel: " << newValue);
+ LogSeverity newSeverity =
+ (newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
+ return Status::OK();
+ }
+} logLevelSetting;
- virtual Status setFromString(const std::string& str) {
-#ifndef MONGO_CONFIG_SSL
- return Status(ErrorCodes::IllegalOperation, mongoutils::str::stream() <<
- "Unable to set sslMode, SSL support is not compiled into server");
-#endif
- if (str != "disabled" && str != "allowSSL" &&
- str != "preferSSL" && str != "requireSSL") {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for sslMode via setParameter command: "
- << str);
- }
-
- int oldMode = sslGlobalParams.sslMode.load();
- if (str == "preferSSL" && oldMode == SSLParams::SSLMode_allowSSL) {
- sslGlobalParams.sslMode.store(SSLParams::SSLMode_preferSSL);
- }
- else if (str == "requireSSL" && oldMode == SSLParams::SSLMode_preferSSL) {
- sslGlobalParams.sslMode.store(SSLParams::SSLMode_requireSSL);
- }
- else {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for sslMode, attempt to change from "
- << sslModeStr() << " to " << str);
- }
- return Status::OK();
- }
- } sslModeSetting;
-
- class ClusterAuthModeSetting : public ServerParameter {
- public:
- ClusterAuthModeSetting() :
- ServerParameter(ServerParameterSet::getGlobal(), "clusterAuthMode",
- false, // allowedToChangeAtStartup
- true // allowedToChangeAtRuntime
- ) {}
-
- std::string clusterAuthModeStr() {
- switch (serverGlobalParams.clusterAuthMode.load()) {
- case ServerGlobalParams::ClusterAuthMode_keyFile:
- return "keyFile";
- case ServerGlobalParams::ClusterAuthMode_sendKeyFile:
- return "sendKeyFile";
- case ServerGlobalParams::ClusterAuthMode_sendX509:
- return "sendX509";
- case ServerGlobalParams::ClusterAuthMode_x509:
- return "x509";
- default:
- return "undefined";
- }
+/**
+ * Log component verbosity.
+ * Log levels of log component hierarchy.
+ * Negative value for a log component means the default log level will be used.
+ */
+class LogComponentVerbositySetting : public ServerParameter {
+ MONGO_DISALLOW_COPYING(LogComponentVerbositySetting);
+
+public:
+ LogComponentVerbositySetting()
+ : ServerParameter(ServerParameterSet::getGlobal(), "logComponentVerbosity") {}
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ BSONObj currentSettings;
+ _get(&currentSettings);
+ b << name << currentSettings;
+ }
+
+ virtual Status set(const BSONElement& newValueElement) {
+ if (!newValueElement.isABSONObj()) {
+ return Status(ErrorCodes::TypeMismatch,
+ mongoutils::str::stream()
+ << "log component verbosity is not a BSON object: "
+ << newValueElement);
+ }
+ return _set(newValueElement.Obj());
+ }
+
+ virtual Status setFromString(const std::string& str) {
+ try {
+ return _set(mongo::fromjson(str));
+ } catch (const DBException& ex) {
+ return ex.toStatus();
+ }
+ }
+
+private:
+ /**
+ * Returns current settings as a BSON document.
+ * The "default" log component is an implementation detail. Don't expose this to users.
+ */
+ void _get(BSONObj* output) const {
+ static const string defaultLogComponentName =
+ LogComponent(LogComponent::kDefault).getShortName();
+
+ mutablebson::Document doc;
+
+ for (int i = 0; i < int(LogComponent::kNumLogComponents); ++i) {
+ LogComponent component = static_cast<LogComponent::Value>(i);
+
+ int severity = -1;
+ if (globalLogDomain()->hasMinimumLogSeverity(component)) {
+ severity = globalLogDomain()->getMinimumLogSeverity(component).toInt();
}
- virtual void append(
- OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
- b << name << clusterAuthModeStr();
+ // Save LogComponent::kDefault LogSeverity at root
+ if (component == LogComponent::kDefault) {
+ doc.root().appendInt("verbosity", severity);
+ continue;
}
- virtual Status set(const BSONElement& newValueElement) {
- try {
- return setFromString(newValueElement.String());
- }
- catch (MsgAssertionException msg) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for clusterAuthMode via setParameter command: "
- << newValueElement);
- }
+ mutablebson::Element element = doc.makeElementObject(component.getShortName());
+ element.appendInt("verbosity", severity);
+
+ mutablebson::Element parentElement = _getParentElement(doc, component);
+ parentElement.pushBack(element);
+ }
+
+ BSONObj result = doc.getObject();
+ output->swap(result);
+ invariant(!output->hasField(defaultLogComponentName));
+ }
+
+ /**
+ * Updates component hierarchy log levels.
+ *
+ * BSON Format:
+ * {
+ * verbosity: 4, <-- maps to 'default' log component.
+ * componentA: {
+ * verbosity: 2, <-- sets componentA's log level to 2.
+ * componentB: {
+ * verbosity: 1, <-- sets componentA.componentB's log level to 1.
+ * }
+ * componentC: {
+ * verbosity: -1, <-- clears componentA.componentC's log level so that
+ * its final loglevel will be inherited from componentA.
+ * }
+ * },
+ * componentD : 3 <-- sets componentD's log level to 3 (alternative to
+ * subdocument with 'verbosity' field).
+ * }
+ *
+ * For the default component, the log level is read from the top-level
+ * "verbosity" field.
+ * For non-default components, we look up the element using the component's
+ * dotted name. If the "<dotted component name>" field is a number, the log
+ * level will be read from the field's value.
+ * Otherwise, we assume that the "<dotted component name>" field is an
+ * object with a "verbosity" field that holds the log level for the component.
+ * The more verbose format with the "verbosity" field is intended to support
+ * setting of log levels of both parent and child log components in the same
+ * BSON document.
+ *
+ * Ignore elements in BSON object that do not map to a log component's dotted
+ * name.
+ */
+ Status _set(const BSONObj& bsonSettings) const {
+ StatusWith<std::vector<LogComponentSetting>> parseStatus =
+ parseLogComponentSettings(bsonSettings);
+
+ if (!parseStatus.isOK()) {
+ return parseStatus.getStatus();
+ }
+
+ std::vector<LogComponentSetting> settings = parseStatus.getValue();
+ std::vector<LogComponentSetting>::iterator it = settings.begin();
+ for (; it < settings.end(); ++it) {
+ LogComponentSetting newSetting = *it;
+ // Negative value means to clear log level of component.
+ if (newSetting.level < 0) {
+ globalLogDomain()->clearMinimumLoggedSeverity(newSetting.component);
+ continue;
}
+ // Convert non-negative value to Log()/Debug(N).
+ LogSeverity newSeverity =
+ (newSetting.level > 0) ? LogSeverity::Debug(newSetting.level) : LogSeverity::Log();
+ globalLogDomain()->setMinimumLoggedSeverity(newSetting.component, newSeverity);
+ }
+
+ return Status::OK();
+ }
+
+ /**
+ * Search document for element corresponding to log component's parent.
+ */
+ static mutablebson::Element _getParentElement(mutablebson::Document& doc,
+ LogComponent component) {
+ // Hide LogComponent::kDefault
+ if (component == LogComponent::kDefault) {
+ return doc.end();
+ }
+ LogComponent parentComponent = component.parent();
+
+ // Attach LogComponent::kDefault children to root
+ if (parentComponent == LogComponent::kDefault) {
+ return doc.root();
+ }
+ mutablebson::Element grandParentElement = _getParentElement(doc, parentComponent);
+ return grandParentElement.findFirstChildNamed(parentComponent.getShortName());
+ }
+} logComponentVerbositySetting;
+
+} // namespace
+
+namespace {
+class SSLModeSetting : public ServerParameter {
+public:
+ SSLModeSetting()
+ : ServerParameter(ServerParameterSet::getGlobal(),
+ "sslMode",
+ false, // allowedToChangeAtStartup
+ true // allowedToChangeAtRuntime
+ ) {}
+
+ std::string sslModeStr() {
+ switch (sslGlobalParams.sslMode.load()) {
+ case SSLParams::SSLMode_disabled:
+ return "disabled";
+ case SSLParams::SSLMode_allowSSL:
+ return "allowSSL";
+ case SSLParams::SSLMode_preferSSL:
+ return "preferSSL";
+ case SSLParams::SSLMode_requireSSL:
+ return "requireSSL";
+ default:
+ return "undefined";
+ }
+ }
+
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << sslModeStr();
+ }
+
+ virtual Status set(const BSONElement& newValueElement) {
+ try {
+ return setFromString(newValueElement.String());
+ } catch (MsgAssertionException msg) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for sslMode via setParameter command: "
+ << newValueElement);
+ }
+ }
- virtual Status setFromString(const std::string& str) {
+ virtual Status setFromString(const std::string& str) {
#ifndef MONGO_CONFIG_SSL
- return Status(ErrorCodes::IllegalOperation, mongoutils::str::stream() <<
- "Unable to set clusterAuthMode, " <<
- "SSL support is not compiled into server");
+ return Status(ErrorCodes::IllegalOperation,
+ mongoutils::str::stream()
+ << "Unable to set sslMode, SSL support is not compiled into server");
#endif
- if (str != "keyFile" && str != "sendKeyFile" &&
- str != "sendX509" && str != "x509") {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Invalid value for clusterAuthMode via setParameter command: "
- << str);
- }
-
- int oldMode = serverGlobalParams.clusterAuthMode.load();
- int sslMode = sslGlobalParams.sslMode.load();
- if (str == "sendX509" &&
- oldMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
- if (sslMode == SSLParams::SSLMode_disabled ||
- sslMode == SSLParams::SSLMode_allowSSL) {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for clusterAuthMode, " <<
- "need to enable SSL for outgoing connections");
- }
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_sendX509);
-#ifdef MONGO_CONFIG_SSL
- setInternalUserAuthParams(BSON(saslCommandMechanismFieldName <<
- "MONGODB-X509" <<
- saslCommandUserDBFieldName << "$external" <<
- saslCommandUserFieldName <<
- getSSLManager()->getSSLConfiguration()
- .clientSubjectName));
-#endif
- }
- else if (str == "x509" &&
- oldMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
- serverGlobalParams.clusterAuthMode.store
- (ServerGlobalParams::ClusterAuthMode_x509);
- }
- else {
- return Status(ErrorCodes::BadValue, mongoutils::str::stream() <<
- "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeStr() << " to " << str);
- }
- return Status::OK();
- }
- } clusterAuthModeSetting;
+ if (str != "disabled" && str != "allowSSL" && str != "preferSSL" && str != "requireSSL") {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for sslMode via setParameter command: " << str);
+ }
- ExportedServerParameter<bool> QuietSetting( ServerParameterSet::getGlobal(),
- "quiet",
- &serverGlobalParams.quiet,
- true,
- true );
+ int oldMode = sslGlobalParams.sslMode.load();
+ if (str == "preferSSL" && oldMode == SSLParams::SSLMode_allowSSL) {
+ sslGlobalParams.sslMode.store(SSLParams::SSLMode_preferSSL);
+ } else if (str == "requireSSL" && oldMode == SSLParams::SSLMode_preferSSL) {
+ sslGlobalParams.sslMode.store(SSLParams::SSLMode_requireSSL);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for sslMode, attempt to change from "
+ << sslModeStr() << " to " << str);
+ }
+ return Status::OK();
+ }
+} sslModeSetting;
+
+class ClusterAuthModeSetting : public ServerParameter {
+public:
+ ClusterAuthModeSetting()
+ : ServerParameter(ServerParameterSet::getGlobal(),
+ "clusterAuthMode",
+ false, // allowedToChangeAtStartup
+ true // allowedToChangeAtRuntime
+ ) {}
+
+ std::string clusterAuthModeStr() {
+ switch (serverGlobalParams.clusterAuthMode.load()) {
+ case ServerGlobalParams::ClusterAuthMode_keyFile:
+ return "keyFile";
+ case ServerGlobalParams::ClusterAuthMode_sendKeyFile:
+ return "sendKeyFile";
+ case ServerGlobalParams::ClusterAuthMode_sendX509:
+ return "sendX509";
+ case ServerGlobalParams::ClusterAuthMode_x509:
+ return "x509";
+ default:
+ return "undefined";
+ }
+ }
- ExportedServerParameter<int> MaxConsecutiveFailedChecksSetting(
- ServerParameterSet::getGlobal(),
- "replMonitorMaxFailedChecks",
- &ReplicaSetMonitor::maxConsecutiveFailedChecks,
- false, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ virtual void append(OperationContext* txn, BSONObjBuilder& b, const std::string& name) {
+ b << name << clusterAuthModeStr();
+ }
- ExportedServerParameter<bool> TraceExceptionsSetting(ServerParameterSet::getGlobal(),
- "traceExceptions",
- &DBException::traceExceptions,
- false, // allowedToChangeAtStartup
- true); // allowedToChangeAtRuntime
+ virtual Status set(const BSONElement& newValueElement) {
+ try {
+ return setFromString(newValueElement.String());
+ } catch (MsgAssertionException msg) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for clusterAuthMode via setParameter command: "
+ << newValueElement);
+ }
+ }
+ virtual Status setFromString(const std::string& str) {
+#ifndef MONGO_CONFIG_SSL
+ return Status(ErrorCodes::IllegalOperation,
+ mongoutils::str::stream() << "Unable to set clusterAuthMode, "
+ << "SSL support is not compiled into server");
+#endif
+ if (str != "keyFile" && str != "sendKeyFile" && str != "sendX509" && str != "x509") {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Invalid value for clusterAuthMode via setParameter command: "
+ << str);
+ }
+ int oldMode = serverGlobalParams.clusterAuthMode.load();
+ int sslMode = sslGlobalParams.sslMode.load();
+ if (str == "sendX509" && oldMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
+ if (sslMode == SSLParams::SSLMode_disabled || sslMode == SSLParams::SSLMode_allowSSL) {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for clusterAuthMode, "
+ << "need to enable SSL for outgoing connections");
+ }
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_sendX509);
+#ifdef MONGO_CONFIG_SSL
+ setInternalUserAuthParams(
+ BSON(saslCommandMechanismFieldName
+ << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << saslCommandUserFieldName
+ << getSSLManager()->getSSLConfiguration().clientSubjectName));
+#endif
+ } else if (str == "x509" && oldMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
+ serverGlobalParams.clusterAuthMode.store(ServerGlobalParams::ClusterAuthMode_x509);
+ } else {
+ return Status(ErrorCodes::BadValue,
+ mongoutils::str::stream()
+ << "Illegal state transition for clusterAuthMode, change from "
+ << clusterAuthModeStr() << " to " << str);
+ }
+ return Status::OK();
}
-
+} clusterAuthModeSetting;
+
+ExportedServerParameter<bool> QuietSetting(
+ ServerParameterSet::getGlobal(), "quiet", &serverGlobalParams.quiet, true, true);
+
+ExportedServerParameter<int> MaxConsecutiveFailedChecksSetting(
+ ServerParameterSet::getGlobal(),
+ "replMonitorMaxFailedChecks",
+ &ReplicaSetMonitor::maxConsecutiveFailedChecks,
+ false, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+
+ExportedServerParameter<bool> TraceExceptionsSetting(ServerParameterSet::getGlobal(),
+ "traceExceptions",
+ &DBException::traceExceptions,
+ false, // allowedToChangeAtStartup
+ true); // allowedToChangeAtRuntime
+}
}
-
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 4f9274dc6b7..95423cf2e7b 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -54,295 +54,284 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::endl;
- using std::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::unique_ptr;
-
- /**
- * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
- * requests). Otherwise, returns false.
- */
- static bool handleCursorCommand(OperationContext* txn,
- const string& ns,
- ClientCursorPin* pin,
- PlanExecutor* exec,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
-
- ClientCursor* cursor = pin ? pin->c() : NULL;
- if (pin) {
- invariant(cursor);
- invariant(cursor->getExecutor() == exec);
- invariant(cursor->isAggCursor());
- }
+using boost::intrusive_ptr;
+using std::endl;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::unique_ptr;
- const long long defaultBatchSize = 101; // Same as query.
- long long batchSize;
- uassertStatusOK(Command::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize));
-
- // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
- BSONArrayBuilder resultsArray;
- const int byteLimit = MaxBytesToReturnToClientAtOnce;
- BSONObj next;
- for (int objCount = 0; objCount < batchSize; objCount++) {
- // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
- // do it when batchSize is 0 since that indicates a desire for a fast return.
- if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
- // make it an obvious error to use cursor or executor after this point
- cursor = NULL;
- exec = NULL;
- break;
- }
+/**
+ * Returns true if we need to keep a ClientCursor saved for this pipeline (for future getMore
+ * requests). Otherwise, returns false.
+ */
+static bool handleCursorCommand(OperationContext* txn,
+ const string& ns,
+ ClientCursorPin* pin,
+ PlanExecutor* exec,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
+ ClientCursor* cursor = pin ? pin->c() : NULL;
+ if (pin) {
+ invariant(cursor);
+ invariant(cursor->getExecutor() == exec);
+ invariant(cursor->isAggCursor());
+ }
- if (resultsArray.len() + next.objsize() > byteLimit) {
- // Get the pipeline proxy stage wrapped by this PlanExecutor.
- PipelineProxyStage* proxy = static_cast<PipelineProxyStage*>(exec->getRootStage());
- // too big. next will be the first doc in the second batch
- proxy->pushBack(next);
- break;
- }
+ const long long defaultBatchSize = 101; // Same as query.
+ long long batchSize;
+ uassertStatusOK(Command::parseCommandCursorOptions(cmdObj, defaultBatchSize, &batchSize));
+
+ // can't use result BSONObjBuilder directly since it won't handle exceptions correctly.
+ BSONArrayBuilder resultsArray;
+ const int byteLimit = MaxBytesToReturnToClientAtOnce;
+ BSONObj next;
+ for (int objCount = 0; objCount < batchSize; objCount++) {
+ // The initial getNext() on a PipelineProxyStage may be very expensive so we don't
+ // do it when batchSize is 0 since that indicates a desire for a fast return.
+ if (exec->getNext(&next, NULL) != PlanExecutor::ADVANCED) {
+ // make it an obvious error to use cursor or executor after this point
+ cursor = NULL;
+ exec = NULL;
+ break;
+ }
- resultsArray.append(next);
+ if (resultsArray.len() + next.objsize() > byteLimit) {
+ // Get the pipeline proxy stage wrapped by this PlanExecutor.
+ PipelineProxyStage* proxy = static_cast<PipelineProxyStage*>(exec->getRootStage());
+ // too big. next will be the first doc in the second batch
+ proxy->pushBack(next);
+ break;
}
- // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
- // be relatively quick since if there was no pin then the input is empty. Also, this
- // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
- // case. This is ok for now however, since you can't have a sharded collection that doesn't
- // exist.
- const bool canReturnMoreBatches = pin;
- if (!canReturnMoreBatches && exec && !exec->isEOF()) {
- // msgasserting since this shouldn't be possible to trigger from today's aggregation
- // language. The wording assumes that the only reason pin would be null is if the
- // collection doesn't exist.
- msgasserted(17391, str::stream()
- << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection " << ns << " doesn't exist");
+ resultsArray.append(next);
+ }
+
+ // NOTE: exec->isEOF() can have side effects such as writing by $out. However, it should
+ // be relatively quick since if there was no pin then the input is empty. Also, this
+ // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that
+ // case. This is ok for now however, since you can't have a sharded collection that doesn't
+ // exist.
+ const bool canReturnMoreBatches = pin;
+ if (!canReturnMoreBatches && exec && !exec->isEOF()) {
+ // msgasserting since this shouldn't be possible to trigger from today's aggregation
+ // language. The wording assumes that the only reason pin would be null is if the
+ // collection doesn't exist.
+ msgasserted(
+ 17391,
+ str::stream() << "Aggregation has more results than fit in initial batch, but can't "
+ << "create cursor since collection " << ns << " doesn't exist");
+ }
+
+ if (cursor) {
+ // If a time limit was set on the pipeline, remaining time is "rolled over" to the
+ // cursor (for use by future getmore ops).
+ cursor->setLeftoverMaxTimeMicros(CurOp::get(txn)->getRemainingMaxTimeMicros());
+
+ CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+
+ if (txn->getClient()->isInDirectClient()) {
+ cursor->setUnownedRecoveryUnit(txn->recoveryUnit());
+ } else {
+ // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
+ // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
+ txn->recoveryUnit()->abandonSnapshot();
+ cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
+ OperationContext::kNotInUnitOfWork) ==
+ OperationContext::kNotInUnitOfWork);
}
- if (cursor) {
- // If a time limit was set on the pipeline, remaining time is "rolled over" to the
- // cursor (for use by future getmore ops).
- cursor->setLeftoverMaxTimeMicros( CurOp::get(txn)->getRemainingMaxTimeMicros() );
+ // Cursor needs to be in a saved state while we yield locks for getmore. State
+ // will be restored in getMore().
+ exec->saveState();
+ }
- CurOp::get(txn)->debug().cursorid = cursor->cursorid();
+ const long long cursorId = cursor ? cursor->cursorid() : 0LL;
+ appendCursorResponseObject(cursorId, ns, resultsArray.arr(), &result);
- if (txn->getClient()->isInDirectClient()) {
- cursor->setUnownedRecoveryUnit(txn->recoveryUnit());
- }
- else {
- // We stash away the RecoveryUnit in the ClientCursor. It's used for subsequent
- // getMore requests. The calling OpCtx gets a fresh RecoveryUnit.
- txn->recoveryUnit()->abandonSnapshot();
- cursor->setOwnedRecoveryUnit(txn->releaseRecoveryUnit());
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(txn->setRecoveryUnit(storageEngine->newRecoveryUnit(),
- OperationContext::kNotInUnitOfWork)
- == OperationContext::kNotInUnitOfWork);
- }
+ return static_cast<bool>(cursor);
+}
- // Cursor needs to be in a saved state while we yield locks for getmore. State
- // will be restored in getMore().
- exec->saveState();
- }
- const long long cursorId = cursor ? cursor->cursorid() : 0LL;
- appendCursorResponseObject(cursorId, ns, resultsArray.arr(), &result);
+class PipelineCommand : public Command {
+public:
+ PipelineCommand() : Command(Pipeline::commandName) {} // command is called "aggregate"
- return static_cast<bool>(cursor);
+ // Locks are managed manually, in particular by DocumentSourceCursor.
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "{ pipeline: [ { $operator: {...}}, ... ]"
+ << ", explain: <bool>"
+ << ", allowDiskUse: <bool>"
+ << ", cursor: {batchSize: <number>}"
+ << " }" << endl
+ << "See http://dochub.mongodb.org/core/aggregation for more details.";
}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ }
- class PipelineCommand :
- public Command {
- public:
- PipelineCommand() :Command(Pipeline::commandName) {} // command is called "aggregate"
-
- // Locks are managed manually, in particular by DocumentSourceCursor.
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
- virtual void help(stringstream &help) const {
- help << "{ pipeline: [ { $operator: {...}}, ... ]"
- << ", explain: <bool>"
- << ", allowDiskUse: <bool>"
- << ", cursor: {batchSize: <number>}"
- << " }"
- << endl
- << "See http://dochub.mongodb.org/core/aggregation for more details."
- ;
+ virtual bool run(OperationContext* txn,
+ const string& db,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNs(db, cmdObj);
+ if (nsToCollectionSubstring(ns).empty()) {
+ errmsg = "missing collection name";
+ return false;
}
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ NamespaceString nss(ns);
+
+ intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss);
+ pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
+
+ /* try to parse the command; if this fails, then we didn't run */
+ intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx);
+ if (!pPipeline.get())
+ return false;
+
+ // This is outside of the if block to keep the object alive until the pipeline is finished.
+ BSONObj parsed;
+ if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
+ // Make sure all operations round-trip through Pipeline::toBson() correctly by
+ // reparsing every command in debug builds. This is important because sharded
+ // aggregations rely on this ability. Skipping when inShard because this has
+ // already been through the transformation (and this unsets pCtx->inShard).
+ parsed = pPipeline->serialize().toBson();
+ pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
+ verify(pPipeline);
}
- virtual bool run(OperationContext* txn,
- const string &db,
- BSONObj &cmdObj,
- int options,
- string &errmsg,
- BSONObjBuilder &result) {
- const std::string ns = parseNs(db, cmdObj);
- if (nsToCollectionSubstring(ns).empty()) {
- errmsg = "missing collection name";
- return false;
+ PlanExecutor* exec = NULL;
+ unique_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
+ unique_ptr<PlanExecutor> execHolder;
+ {
+ // This will throw if the sharding version for this connection is out of date. The
+ // lock must be held continuously from now until we have we created both the output
+ // ClientCursor and the input executor. This ensures that both are using the same
+ // sharding version that we synchronize on here. This is also why we always need to
+ // create a ClientCursor even when we aren't outputting to a cursor. See the comment
+ // on ShardFilterStage for more details.
+ AutoGetCollectionForRead ctx(txn, nss.ns());
+
+ Collection* collection = ctx.getCollection();
+
+ // This does mongod-specific stuff like creating the input PlanExecutor and adding
+ // it to the front of the pipeline if needed.
+ std::shared_ptr<PlanExecutor> input =
+ PipelineD::prepareCursorSource(txn, collection, pPipeline, pCtx);
+ pPipeline->stitch();
+
+ // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
+ // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
+ // PlanExecutor.
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<PipelineProxyStage> proxy(
+ new PipelineProxyStage(pPipeline, input, ws.get()));
+ Status execStatus = Status::OK();
+ if (NULL == collection) {
+ execStatus = PlanExecutor::make(txn,
+ ws.release(),
+ proxy.release(),
+ nss.ns(),
+ PlanExecutor::YIELD_MANUAL,
+ &exec);
+ } else {
+ execStatus = PlanExecutor::make(txn,
+ ws.release(),
+ proxy.release(),
+ collection,
+ PlanExecutor::YIELD_MANUAL,
+ &exec);
}
- NamespaceString nss(ns);
-
- intrusive_ptr<ExpressionContext> pCtx = new ExpressionContext(txn, nss);
- pCtx->tempDir = storageGlobalParams.dbpath + "/_tmp";
-
- /* try to parse the command; if this fails, then we didn't run */
- intrusive_ptr<Pipeline> pPipeline = Pipeline::parseCommand(errmsg, cmdObj, pCtx);
- if (!pPipeline.get())
- return false;
-
- // This is outside of the if block to keep the object alive until the pipeline is finished.
- BSONObj parsed;
- if (kDebugBuild && !pPipeline->isExplain() && !pCtx->inShard) {
- // Make sure all operations round-trip through Pipeline::toBson() correctly by
- // reparsing every command in debug builds. This is important because sharded
- // aggregations rely on this ability. Skipping when inShard because this has
- // already been through the transformation (and this unsets pCtx->inShard).
- parsed = pPipeline->serialize().toBson();
- pPipeline = Pipeline::parseCommand(errmsg, parsed, pCtx);
- verify(pPipeline);
+ invariant(execStatus.isOK());
+ execHolder.reset(exec);
+
+ if (!collection && input) {
+ // If we don't have a collection, we won't be able to register any executors, so
+ // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't
+ // need to be registered.
+ invariant(!input->collection());
}
- PlanExecutor* exec = NULL;
- unique_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
- unique_ptr<PlanExecutor> execHolder;
- {
- // This will throw if the sharding version for this connection is out of date. The
- // lock must be held continuously from now until we have we created both the output
- // ClientCursor and the input executor. This ensures that both are using the same
- // sharding version that we synchronize on here. This is also why we always need to
- // create a ClientCursor even when we aren't outputting to a cursor. See the comment
- // on ShardFilterStage for more details.
- AutoGetCollectionForRead ctx(txn, nss.ns());
-
- Collection* collection = ctx.getCollection();
-
- // This does mongod-specific stuff like creating the input PlanExecutor and adding
- // it to the front of the pipeline if needed.
- std::shared_ptr<PlanExecutor> input = PipelineD::prepareCursorSource(txn,
- collection,
- pPipeline,
- pCtx);
- pPipeline->stitch();
-
- // Create the PlanExecutor which returns results from the pipeline. The WorkingSet
- // ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
- // PlanExecutor.
- unique_ptr<WorkingSet> ws(new WorkingSet());
- unique_ptr<PipelineProxyStage> proxy(
- new PipelineProxyStage(pPipeline, input, ws.get()));
- Status execStatus = Status::OK();
- if (NULL == collection) {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- nss.ns(),
- PlanExecutor::YIELD_MANUAL,
- &exec);
- }
- else {
- execStatus = PlanExecutor::make(txn,
- ws.release(),
- proxy.release(),
- collection,
- PlanExecutor::YIELD_MANUAL,
- &exec);
- }
- invariant(execStatus.isOK());
- execHolder.reset(exec);
-
- if (!collection && input) {
- // If we don't have a collection, we won't be able to register any executors, so
- // make sure that the input PlanExecutor (likely wrapping an EOFStage) doesn't
- // need to be registered.
- invariant(!input->collection());
- }
-
- if (collection) {
- const bool isAggCursor = true; // enable special locking behavior
- ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
- execHolder.release(),
- nss.ns(),
- 0,
- cmdObj.getOwned(),
- isAggCursor);
- pin.reset(new ClientCursorPin(collection->getCursorManager(),
- cursor->cursorid()));
- // Don't add any code between here and the start of the try block.
- }
-
- // At this point, it is safe to release the collection lock.
- // - In the case where we have a collection: we will need to reacquire the
- // collection lock later when cleaning up our ClientCursorPin.
- // - In the case where we don't have a collection: our PlanExecutor won't be
- // registered, so it will be safe to clean it up outside the lock.
- invariant(NULL == execHolder.get() || NULL == execHolder->collection());
+ if (collection) {
+ const bool isAggCursor = true; // enable special locking behavior
+ ClientCursor* cursor = new ClientCursor(collection->getCursorManager(),
+ execHolder.release(),
+ nss.ns(),
+ 0,
+ cmdObj.getOwned(),
+ isAggCursor);
+ pin.reset(new ClientCursorPin(collection->getCursorManager(), cursor->cursorid()));
+ // Don't add any code between here and the start of the try block.
}
- try {
- // Unless set to true, the ClientCursor created above will be deleted on block exit.
- bool keepCursor = false;
+ // At this point, it is safe to release the collection lock.
+ // - In the case where we have a collection: we will need to reacquire the
+ // collection lock later when cleaning up our ClientCursorPin.
+ // - In the case where we don't have a collection: our PlanExecutor won't be
+ // registered, so it will be safe to clean it up outside the lock.
+ invariant(NULL == execHolder.get() || NULL == execHolder->collection());
+ }
- const bool isCursorCommand = !cmdObj["cursor"].eoo();
+ try {
+ // Unless set to true, the ClientCursor created above will be deleted on block exit.
+ bool keepCursor = false;
- // If both explain and cursor are specified, explain wins.
- if (pPipeline->isExplain()) {
- result << "stages" << Value(pPipeline->writeExplainOps());
- }
- else if (isCursorCommand) {
- keepCursor = handleCursorCommand(txn,
- nss.ns(),
- pin.get(),
- exec,
- cmdObj,
- result);
- }
- else {
- pPipeline->run(result);
- }
+ const bool isCursorCommand = !cmdObj["cursor"].eoo();
- // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
- // in order to do so.
- if (pin) {
- // We acquire locks here with DBLock and CollectionLock instead of using
- // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
- // sharding version is out of date, and we don't care if the sharding version
- // has changed.
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
- if (keepCursor) {
- pin->release();
- }
- else {
- pin->deleteUnderlying();
- }
- }
+ // If both explain and cursor are specified, explain wins.
+ if (pPipeline->isExplain()) {
+ result << "stages" << Value(pPipeline->writeExplainOps());
+ } else if (isCursorCommand) {
+ keepCursor = handleCursorCommand(txn, nss.ns(), pin.get(), exec, cmdObj, result);
+ } else {
+ pPipeline->run(result);
}
- catch (...) {
- // On our way out of scope, we clean up our ClientCursorPin if needed.
- if (pin) {
- Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+
+ // Clean up our ClientCursorPin, if needed. We must reacquire the collection lock
+ // in order to do so.
+ if (pin) {
+ // We acquire locks here with DBLock and CollectionLock instead of using
+ // AutoGetCollectionForRead. AutoGetCollectionForRead will throw if the
+ // sharding version is out of date, and we don't care if the sharding version
+ // has changed.
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ if (keepCursor) {
+ pin->release();
+ } else {
pin->deleteUnderlying();
}
- throw;
}
- // Any code that needs the cursor pinned must be inside the try block, above.
-
- return true;
+ } catch (...) {
+ // On our way out of scope, we clean up our ClientCursorPin if needed.
+ if (pin) {
+ Lock::DBLock dbLock(txn->lockState(), nss.db(), MODE_IS);
+ Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_IS);
+ pin->deleteUnderlying();
+ }
+ throw;
}
- } cmdPipeline;
+ // Any code that needs the cursor pinned must be inside the try block, above.
+
+ return true;
+ }
+} cmdPipeline;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 3fc5d8f313e..1fc0b40493c 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -48,399 +48,401 @@
namespace {
- using std::string;
- using std::unique_ptr;
- using namespace mongo;
-
- /**
- * Utility function to extract error code and message from status
- * and append to BSON results.
- */
- void addStatus(const Status& status, BSONObjBuilder& builder) {
- builder.append("ok", status.isOK() ? 1.0 : 0.0);
- if (!status.isOK()) {
- builder.append("code", status.code());
- }
- if (!status.reason().empty()) {
- builder.append("errmsg", status.reason());
- }
- }
+using std::string;
+using std::unique_ptr;
+using namespace mongo;
- /**
- * Retrieves a collection's plan cache from the database.
- */
- static Status getPlanCache(OperationContext* txn,
- Collection* collection,
- const string& ns,
- PlanCache** planCacheOut) {
- *planCacheOut = NULL;
-
- if (NULL == collection) {
- return Status(ErrorCodes::BadValue, "no such collection");
- }
+/**
+ * Utility function to extract error code and message from status
+ * and append to BSON results.
+ */
+void addStatus(const Status& status, BSONObjBuilder& builder) {
+ builder.append("ok", status.isOK() ? 1.0 : 0.0);
+ if (!status.isOK()) {
+ builder.append("code", status.code());
+ }
+ if (!status.reason().empty()) {
+ builder.append("errmsg", status.reason());
+ }
+}
- CollectionInfoCache* infoCache = collection->infoCache();
- invariant(infoCache);
+/**
+ * Retrieves a collection's plan cache from the database.
+ */
+static Status getPlanCache(OperationContext* txn,
+ Collection* collection,
+ const string& ns,
+ PlanCache** planCacheOut) {
+ *planCacheOut = NULL;
+
+ if (NULL == collection) {
+ return Status(ErrorCodes::BadValue, "no such collection");
+ }
- PlanCache* planCache = infoCache->getPlanCache();
- invariant(planCache);
+ CollectionInfoCache* infoCache = collection->infoCache();
+ invariant(infoCache);
- *planCacheOut = planCache;
- return Status::OK();
- }
+ PlanCache* planCache = infoCache->getPlanCache();
+ invariant(planCache);
- //
- // Command instances.
- // Registers commands with the command system and make commands
- // available to the client.
- //
+ *planCacheOut = planCache;
+ return Status::OK();
+}
- MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands, MONGO_NO_PREREQUISITES)(
- InitializerContext* context) {
+//
+// Command instances.
+// Registers commands with the command system and make commands
+// available to the client.
+//
- // PlanCacheCommand constructors refer to static ActionType instances.
- // Registering commands in a mongo static initializer ensures that
- // the ActionType construction will be completed first.
- new PlanCacheListQueryShapes();
- new PlanCacheClear();
- new PlanCacheListPlans();
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands,
+ MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+ // PlanCacheCommand constructors refer to static ActionType instances.
+ // Registering commands in a mongo static initializer ensures that
+ // the ActionType construction will be completed first.
+ new PlanCacheListQueryShapes();
+ new PlanCacheClear();
+ new PlanCacheListPlans();
- return Status::OK();
- }
+ return Status::OK();
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
- using std::unique_ptr;
+using std::string;
+using std::stringstream;
+using std::vector;
+using std::unique_ptr;
- PlanCacheCommand::PlanCacheCommand(const string& name, const string& helpText,
- ActionType actionType)
- : Command(name),
- helpText(helpText),
- actionType(actionType) { }
+PlanCacheCommand::PlanCacheCommand(const string& name,
+ const string& helpText,
+ ActionType actionType)
+ : Command(name), helpText(helpText), actionType(actionType) {}
- bool PlanCacheCommand::run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = parseNs(dbname, cmdObj);
+bool PlanCacheCommand::run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = parseNs(dbname, cmdObj);
- Status status = runPlanCacheCommand(txn, ns, cmdObj, &result);
+ Status status = runPlanCacheCommand(txn, ns, cmdObj, &result);
- if (!status.isOK()) {
- addStatus(status, result);
- return false;
- }
-
- return true;
+ if (!status.isOK()) {
+ addStatus(status, result);
+ return false;
}
- bool PlanCacheCommand::isWriteCommandForConfigServer() const { return false; }
+ return true;
+}
- bool PlanCacheCommand::slaveOk() const {
- return false;
- }
+bool PlanCacheCommand::isWriteCommandForConfigServer() const {
+ return false;
+}
- bool PlanCacheCommand::slaveOverrideOk() const {
- return true;
- }
+bool PlanCacheCommand::slaveOk() const {
+ return false;
+}
- void PlanCacheCommand::help(stringstream& ss) const {
- ss << helpText;
- }
+bool PlanCacheCommand::slaveOverrideOk() const {
+ return true;
+}
- Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+void PlanCacheCommand::help(stringstream& ss) const {
+ ss << helpText;
+}
- if (authzSession->isAuthorizedForActionsOnResource(pattern, actionType)) {
- return Status::OK();
- }
+Status PlanCacheCommand::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, actionType)) {
+ return Status::OK();
}
- // static
- Status PlanCacheCommand::canonicalize(OperationContext* txn,
- const string& ns,
- const BSONObj& cmdObj,
- CanonicalQuery** canonicalQueryOut) {
- // query - required
- BSONElement queryElt = cmdObj.getField("query");
- if (queryElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field query missing");
- }
- if (!queryElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "required field query must be an object");
- }
- if (queryElt.eoo()) {
- return Status(ErrorCodes::BadValue, "required field query missing");
- }
- BSONObj queryObj = queryElt.Obj();
-
- // sort - optional
- BSONElement sortElt = cmdObj.getField("sort");
- BSONObj sortObj;
- if (!sortElt.eoo()) {
- if (!sortElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "optional field sort must be an object");
- }
- sortObj = sortElt.Obj();
- }
-
- // projection - optional
- BSONElement projElt = cmdObj.getField("projection");
- BSONObj projObj;
- if (!projElt.eoo()) {
- if (!projElt.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "optional field projection must be an object");
- }
- projObj = projElt.Obj();
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+// static
+Status PlanCacheCommand::canonicalize(OperationContext* txn,
+ const string& ns,
+ const BSONObj& cmdObj,
+ CanonicalQuery** canonicalQueryOut) {
+ // query - required
+ BSONElement queryElt = cmdObj.getField("query");
+ if (queryElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field query missing");
+ }
+ if (!queryElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "required field query must be an object");
+ }
+ if (queryElt.eoo()) {
+ return Status(ErrorCodes::BadValue, "required field query missing");
+ }
+ BSONObj queryObj = queryElt.Obj();
+
+ // sort - optional
+ BSONElement sortElt = cmdObj.getField("sort");
+ BSONObj sortObj;
+ if (!sortElt.eoo()) {
+ if (!sortElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "optional field sort must be an object");
}
+ sortObj = sortElt.Obj();
+ }
- // Create canonical query
- CanonicalQuery* cqRaw;
-
- const NamespaceString nss(ns);
- const WhereCallbackReal whereCallback(txn, nss.db());
-
- Status result = CanonicalQuery::canonicalize(
- ns, queryObj, sortObj, projObj, &cqRaw, whereCallback);
- if (!result.isOK()) {
- return result;
+ // projection - optional
+ BSONElement projElt = cmdObj.getField("projection");
+ BSONObj projObj;
+ if (!projElt.eoo()) {
+ if (!projElt.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "optional field projection must be an object");
}
-
- *canonicalQueryOut = cqRaw;
- return Status::OK();
+ projObj = projElt.Obj();
}
- PlanCacheListQueryShapes::PlanCacheListQueryShapes() : PlanCacheCommand("planCacheListQueryShapes",
- "Displays all query shapes in a collection.",
- ActionType::planCacheRead) { }
+ // Create canonical query
+ CanonicalQuery* cqRaw;
- Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+ const NamespaceString nss(ns);
+ const WhereCallbackReal whereCallback(txn, nss.db());
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - return results with empty shapes array.
- BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- arrayBuilder.doneFast();
- return Status::OK();
- }
- return list(*planCache, bob);
+ Status result =
+ CanonicalQuery::canonicalize(ns, queryObj, sortObj, projObj, &cqRaw, whereCallback);
+ if (!result.isOK()) {
+ return result;
}
- // static
- Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder* bob) {
- invariant(bob);
-
- // Fetch all cached solutions from plan cache.
- vector<PlanCacheEntry*> solutions = planCache.getAllEntries();
-
+ *canonicalQueryOut = cqRaw;
+ return Status::OK();
+}
+
+PlanCacheListQueryShapes::PlanCacheListQueryShapes()
+ : PlanCacheCommand("planCacheListQueryShapes",
+ "Displays all query shapes in a collection.",
+ ActionType::planCacheRead) {}
+
+Status PlanCacheListQueryShapes::runPlanCacheCommand(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query cache is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - return results with empty shapes array.
BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) {
- PlanCacheEntry* entry = *i;
- invariant(entry);
-
- BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
- shapeBuilder.append("query", entry->query);
- shapeBuilder.append("sort", entry->sort);
- shapeBuilder.append("projection", entry->projection);
- shapeBuilder.doneFast();
-
- // Release resources for cached solution after extracting query shape.
- delete entry;
- }
arrayBuilder.doneFast();
-
return Status::OK();
}
+ return list(*planCache, bob);
+}
- PlanCacheClear::PlanCacheClear() : PlanCacheCommand("planCacheClear",
- "Drops one or all cached queries in a collection.",
- ActionType::planCacheWrite) { }
-
- Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForRead ctx(txn, ns);
+// static
+Status PlanCacheListQueryShapes::list(const PlanCache& planCache, BSONObjBuilder* bob) {
+ invariant(bob);
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - nothing to do. Return OK status.
- return Status::OK();
- }
- return clear(txn, planCache, ns, cmdObj);
- }
+ // Fetch all cached solutions from plan cache.
+ vector<PlanCacheEntry*> solutions = planCache.getAllEntries();
- // static
- Status PlanCacheClear::clear(OperationContext* txn,
- PlanCache* planCache,
- const string& ns,
- const BSONObj& cmdObj) {
- invariant(planCache);
-
- // According to the specification, the planCacheClear command runs in two modes:
- // - clear all query shapes; or
- // - clear plans for single query shape when a query shape is described in the
- // command arguments.
- if (cmdObj.hasField("query")) {
- CanonicalQuery* cqRaw;
- Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
+ for (vector<PlanCacheEntry*>::const_iterator i = solutions.begin(); i != solutions.end(); i++) {
+ PlanCacheEntry* entry = *i;
+ invariant(entry);
- unique_ptr<CanonicalQuery> cq(cqRaw);
+ BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
+ shapeBuilder.append("query", entry->query);
+ shapeBuilder.append("sort", entry->sort);
+ shapeBuilder.append("projection", entry->projection);
+ shapeBuilder.doneFast();
- if (!planCache->contains(*cq)) {
- // Log if asked to clear non-existent query shape.
- LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
- << cq->getQueryObj().toString()
- << "(sort: " << cq->getParsed().getSort()
- << "; projection: " << cq->getParsed().getProj() << ")";
- return Status::OK();
- }
+ // Release resources for cached solution after extracting query shape.
+ delete entry;
+ }
+ arrayBuilder.doneFast();
+
+ return Status::OK();
+}
+
+PlanCacheClear::PlanCacheClear()
+ : PlanCacheCommand("planCacheClear",
+ "Drops one or all cached queries in a collection.",
+ ActionType::planCacheWrite) {}
+
+Status PlanCacheClear::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ // This is a read lock. The query cache is owned by the collection.
+ AutoGetCollectionForRead ctx(txn, ns);
+
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - nothing to do. Return OK status.
+ return Status::OK();
+ }
+ return clear(txn, planCache, ns, cmdObj);
+}
+
+// static
+Status PlanCacheClear::clear(OperationContext* txn,
+ PlanCache* planCache,
+ const string& ns,
+ const BSONObj& cmdObj) {
+ invariant(planCache);
+
+ // According to the specification, the planCacheClear command runs in two modes:
+ // - clear all query shapes; or
+ // - clear plans for single query shape when a query shape is described in the
+ // command arguments.
+ if (cmdObj.hasField("query")) {
+ CanonicalQuery* cqRaw;
+ Status status = PlanCacheCommand::canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
- Status result = planCache->remove(*cq);
- if (!result.isOK()) {
- return result;
- }
+ unique_ptr<CanonicalQuery> cq(cqRaw);
- LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
- << "(sort: " << cq->getParsed().getSort()
+ if (!planCache->contains(*cq)) {
+ // Log if asked to clear non-existent query shape.
+ LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
+ << cq->getQueryObj().toString() << "(sort: " << cq->getParsed().getSort()
<< "; projection: " << cq->getParsed().getProj() << ")";
-
return Status::OK();
}
- // If query is not provided, make sure sort and projection are not in arguments.
- // We do not want to clear the entire cache inadvertently when the user
- // forgets to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
- return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ Status result = planCache->remove(*cq);
+ if (!result.isOK()) {
+ return result;
}
- planCache->clear();
-
- LOG(1) << ns << ": cleared plan cache";
+ LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
+ << "(sort: " << cq->getParsed().getSort()
+ << "; projection: " << cq->getParsed().getProj() << ")";
return Status::OK();
}
- PlanCacheListPlans::PlanCacheListPlans() : PlanCacheCommand("planCacheListPlans",
- "Displays the cached plans for a query shape.",
- ActionType::planCacheRead) { }
+ // If query is not provided, make sure sort and projection are not in arguments.
+ // We do not want to clear the entire cache inadvertently when the user
+ // forgets to provide a value for "query".
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection")) {
+ return Status(ErrorCodes::BadValue, "sort or projection provided without query");
+ }
- Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- AutoGetCollectionForRead ctx(txn, ns);
+ planCache->clear();
- PlanCache* planCache;
- Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - return empty plans array.
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- plansBuilder.doneFast();
- return Status::OK();
- }
- return list(txn, *planCache, ns, cmdObj, bob);
- }
+ LOG(1) << ns << ": cleared plan cache";
- // static
- Status PlanCacheListPlans::list(OperationContext* txn,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- CanonicalQuery* cqRaw;
- Status status = canonicalize(txn, ns, cmdObj, &cqRaw);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- unique_ptr<CanonicalQuery> cq(cqRaw);
+PlanCacheListPlans::PlanCacheListPlans()
+ : PlanCacheCommand("planCacheListPlans",
+ "Displays the cached plans for a query shape.",
+ ActionType::planCacheRead) {}
- if (!planCache.contains(*cq)) {
- // Return empty plans in results if query shape does not
- // exist in plan cache.
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- plansBuilder.doneFast();
- return Status::OK();
- }
+Status PlanCacheListPlans::runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ AutoGetCollectionForRead ctx(txn, ns);
- PlanCacheEntry* entryRaw;
- Status result = planCache.getEntry(*cq, &entryRaw);
- if (!result.isOK()) {
- return result;
- }
- unique_ptr<PlanCacheEntry> entry(entryRaw);
+ PlanCache* planCache;
+ Status status = getPlanCache(txn, ctx.getCollection(), ns, &planCache);
+ if (!status.isOK()) {
+ // No collection - return empty plans array.
+ BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
+ plansBuilder.doneFast();
+ return Status::OK();
+ }
+ return list(txn, *planCache, ns, cmdObj, bob);
+}
+
+// static
+Status PlanCacheListPlans::list(OperationContext* txn,
+ const PlanCache& planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* bob) {
+ CanonicalQuery* cqRaw;
+ Status status = canonicalize(txn, ns, cmdObj, &cqRaw);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+ if (!planCache.contains(*cq)) {
+ // Return empty plans in results if query shape does not
+ // exist in plan cache.
BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- size_t numPlans = entry->plannerData.size();
- invariant(numPlans == entry->decision->stats.size());
- invariant(numPlans == entry->decision->scores.size());
- for (size_t i = 0; i < numPlans; ++i) {
- BSONObjBuilder planBob(plansBuilder.subobjStart());
-
- // Create plan details field.
- // Currently, simple string representationg of
- // SolutionCacheData. Need to revisit format when we
- // need to parse user-provided plan details for planCacheAddPlan.
- SolutionCacheData* scd = entry->plannerData[i];
- BSONObjBuilder detailsBob(planBob.subobjStart("details"));
- detailsBob.append("solution", scd->toString());
- detailsBob.doneFast();
-
- // reason is comprised of score and initial stats provided by
- // multi plan runner.
- BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
- reasonBob.append("score", entry->decision->scores[i]);
- BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
- PlanStageStats* stats = entry->decision->stats.vector()[i];
- if (stats) {
- Explain::statsToBSON(*stats, &statsBob);
- }
- statsBob.doneFast();
- reasonBob.doneFast();
-
- // BSON object for 'feedback' field shows scores from historical executions of the plan.
- BSONObjBuilder feedbackBob(planBob.subobjStart("feedback"));
- if (i == 0U) {
- feedbackBob.append("nfeedback", int(entry->feedback.size()));
- BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores"));
- for (size_t i = 0; i < entry->feedback.size(); ++i) {
- BSONObjBuilder scoreBob(scoresBob.subobjStart());
- scoreBob.append("score", entry->feedback[i]->score);
- }
- scoresBob.doneFast();
- }
- feedbackBob.doneFast();
+ plansBuilder.doneFast();
+ return Status::OK();
+ }
- planBob.append("filterSet", scd->indexFilterApplied);
+ PlanCacheEntry* entryRaw;
+ Status result = planCache.getEntry(*cq, &entryRaw);
+ if (!result.isOK()) {
+ return result;
+ }
+ unique_ptr<PlanCacheEntry> entry(entryRaw);
+
+ BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
+ size_t numPlans = entry->plannerData.size();
+ invariant(numPlans == entry->decision->stats.size());
+ invariant(numPlans == entry->decision->scores.size());
+ for (size_t i = 0; i < numPlans; ++i) {
+ BSONObjBuilder planBob(plansBuilder.subobjStart());
+
+ // Create plan details field.
+ // Currently, simple string representationg of
+ // SolutionCacheData. Need to revisit format when we
+ // need to parse user-provided plan details for planCacheAddPlan.
+ SolutionCacheData* scd = entry->plannerData[i];
+ BSONObjBuilder detailsBob(planBob.subobjStart("details"));
+ detailsBob.append("solution", scd->toString());
+ detailsBob.doneFast();
+
+ // reason is comprised of score and initial stats provided by
+ // multi plan runner.
+ BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
+ reasonBob.append("score", entry->decision->scores[i]);
+ BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
+ PlanStageStats* stats = entry->decision->stats.vector()[i];
+ if (stats) {
+ Explain::statsToBSON(*stats, &statsBob);
}
- plansBuilder.doneFast();
+ statsBob.doneFast();
+ reasonBob.doneFast();
+
+ // BSON object for 'feedback' field shows scores from historical executions of the plan.
+ BSONObjBuilder feedbackBob(planBob.subobjStart("feedback"));
+ if (i == 0U) {
+ feedbackBob.append("nfeedback", int(entry->feedback.size()));
+ BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores"));
+ for (size_t i = 0; i < entry->feedback.size(); ++i) {
+ BSONObjBuilder scoreBob(scoresBob.subobjStart());
+ scoreBob.append("score", entry->feedback[i]->score);
+ }
+ scoresBob.doneFast();
+ }
+ feedbackBob.doneFast();
- return Status::OK();
+ planBob.append("filterSet", scd->indexFilterApplied);
}
+ plansBuilder.doneFast();
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index 05b7c5969a8..3858704dbde 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -33,155 +33,154 @@
namespace mongo {
+/**
+ * DB commands for plan cache.
+ * These are in a header to facilitate unit testing. See plan_cache_commands_test.cpp.
+ */
+
+/**
+ * PlanCacheCommand
+ * Defines common attributes for all plan cache related commands
+ * such as slaveOk.
+ */
+class PlanCacheCommand : public Command {
+public:
+ PlanCacheCommand(const std::string& name, const std::string& helpText, ActionType actionType);
+
/**
- * DB commands for plan cache.
- * These are in a header to facilitate unit testing. See plan_cache_commands_test.cpp.
+ * Entry point from command subsystem.
+ * Implementation provides standardization of error handling
+ * such as adding error code and message to BSON result.
+ *
+ * Do not override in derived classes.
+ * Override runPlanCacheCommands instead to
+ * implement plan cache command functionality.
*/
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual bool slaveOk() const;
+
+ virtual bool slaveOverrideOk() const;
+
+ virtual void help(std::stringstream& ss) const;
+
/**
- * PlanCacheCommand
- * Defines common attributes for all plan cache related commands
- * such as slaveOk.
+ * Two action types defined for plan cache commands:
+ * - planCacheRead
+ * - planCacheWrite
*/
- class PlanCacheCommand : public Command {
- public:
- PlanCacheCommand(const std::string& name, const std::string& helpText,
- ActionType actionType);
-
- /**
- * Entry point from command subsystem.
- * Implementation provides standardization of error handling
- * such as adding error code and message to BSON result.
- *
- * Do not override in derived classes.
- * Override runPlanCacheCommands instead to
- * implement plan cache command functionality.
- */
-
- bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual bool slaveOk() const;
-
- virtual bool slaveOverrideOk() const;
-
- virtual void help(std::stringstream& ss) const;
-
- /**
- * Two action types defined for plan cache commands:
- * - planCacheRead
- * - planCacheWrite
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
- /**
- * Subset of command arguments used by plan cache commands
- * Override to provide command functionality.
- * Should contain just enough logic to invoke run*Command() function
- * in plan_cache.h
- */
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob) = 0;
-
- /**
- * Validatess query shape from command object and returns canonical query.
- */
- static Status canonicalize(OperationContext* txn,
- const std::string& ns,
- const BSONObj& cmdObj,
- CanonicalQuery** canonicalQueryOut);
-
- private:
- std::string helpText;
- ActionType actionType;
- };
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+ /**
+ * Subset of command arguments used by plan cache commands
+ * Override to provide command functionality.
+ * Should contain just enough logic to invoke run*Command() function
+ * in plan_cache.h
+ */
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob) = 0;
/**
- * planCacheListQueryShapes
- *
- * { planCacheListQueryShapes: <collection> }
- *
+ * Validatess query shape from command object and returns canonical query.
*/
- class PlanCacheListQueryShapes : public PlanCacheCommand {
- public:
- PlanCacheListQueryShapes();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Looks up cache keys for collection's plan cache.
- * Inserts keys for query into BSON builder.
- */
- static Status list(const PlanCache& planCache, BSONObjBuilder* bob);
- };
+ static Status canonicalize(OperationContext* txn,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ CanonicalQuery** canonicalQueryOut);
+
+private:
+ std::string helpText;
+ ActionType actionType;
+};
+
+/**
+ * planCacheListQueryShapes
+ *
+ * { planCacheListQueryShapes: <collection> }
+ *
+ */
+class PlanCacheListQueryShapes : public PlanCacheCommand {
+public:
+ PlanCacheListQueryShapes();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * planCacheClear
- *
- * {
- * planCacheClear: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
+ * Looks up cache keys for collection's plan cache.
+ * Inserts keys for query into BSON builder.
*/
- class PlanCacheClear : public PlanCacheCommand {
- public:
- PlanCacheClear();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Clears collection's plan cache.
- * If query shape is provided, clears plans for that single query shape only.
- */
- static Status clear(OperationContext* txn,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
- };
+ static Status list(const PlanCache& planCache, BSONObjBuilder* bob);
+};
+
+/**
+ * planCacheClear
+ *
+ * {
+ * planCacheClear: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>
+ * }
+ *
+ */
+class PlanCacheClear : public PlanCacheCommand {
+public:
+ PlanCacheClear();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
/**
- * planCacheListPlans
- *
- * {
- * planCacheListPlans: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
+ * Clears collection's plan cache.
+ * If query shape is provided, clears plans for that single query shape only.
+ */
+ static Status clear(OperationContext* txn,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj);
+};
+
+/**
+ * planCacheListPlans
+ *
+ * {
+ * planCacheListPlans: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>
+ * }
+ *
+ */
+class PlanCacheListPlans : public PlanCacheCommand {
+public:
+ PlanCacheListPlans();
+ virtual Status runPlanCacheCommand(OperationContext* txn,
+ const std::string& ns,
+ BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+
+ /**
+ * Displays the cached plans for a query shape.
*/
- class PlanCacheListPlans : public PlanCacheCommand {
- public:
- PlanCacheListPlans();
- virtual Status runPlanCacheCommand(OperationContext* txn,
- const std::string& ns,
- BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Displays the cached plans for a query shape.
- */
- static Status list(OperationContext* txn,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
- };
+ static Status list(OperationContext* txn,
+ const PlanCache& planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* bob);
+};
} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 86eecdbda7e..8a7eee783d8 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -45,371 +45,374 @@ using namespace mongo;
namespace {
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- static const char* ns = "test.t";
-
- /**
- * Tests for planCacheListQueryShapes
- */
-
- /**
- * Utility function to get list of keys in the cache.
- */
- std::vector<BSONObj> getShapes(const PlanCache& planCache) {
- BSONObjBuilder bob;
- ASSERT_OK(PlanCacheListQueryShapes::list(planCache, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement shapesElt = resultObj.getField("shapes");
- ASSERT_EQUALS(shapesElt.type(), mongo::Array);
- vector<BSONElement> shapesEltArray = shapesElt.Array();
- vector<BSONObj> shapes;
- for (vector<BSONElement>::const_iterator i = shapesEltArray.begin();
- i != shapesEltArray.end(); ++i) {
- const BSONElement& elt = *i;
-
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // query
- BSONElement queryElt = obj.getField("query");
- ASSERT_TRUE(queryElt.isABSONObj());
-
- // sort
- BSONElement sortElt = obj.getField("sort");
- ASSERT_TRUE(sortElt.isABSONObj());
-
- // projection
- BSONElement projectionElt = obj.getField("projection");
- ASSERT_TRUE(projectionElt.isABSONObj());
-
- // All fields OK. Append to vector.
- shapes.push_back(obj.getOwned());
- }
- return shapes;
- }
+using std::string;
+using std::unique_ptr;
+using std::vector;
- /**
- * Utility function to create a SolutionCacheData
- */
- SolutionCacheData* createSolutionCacheData() {
- unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
- scd->tree.reset(new PlanCacheIndexTree());
- return scd.release();
- }
+static const char* ns = "test.t";
- /**
- * Utility function to create a PlanRankingDecision
- */
- PlanRankingDecision* createDecision(size_t numPlans) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
- stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why.release();
- }
+/**
+ * Tests for planCacheListQueryShapes
+ */
- TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
- PlanCache empty;
- vector<BSONObj> shapes = getShapes(empty);
- ASSERT_TRUE(shapes.empty());
- }
+/**
+ * Utility function to get list of keys in the cache.
+ */
+std::vector<BSONObj> getShapes(const PlanCache& planCache) {
+ BSONObjBuilder bob;
+ ASSERT_OK(PlanCacheListQueryShapes::list(planCache, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement shapesElt = resultObj.getField("shapes");
+ ASSERT_EQUALS(shapesElt.type(), mongo::Array);
+ vector<BSONElement> shapesEltArray = shapesElt.Array();
+ vector<BSONObj> shapes;
+ for (vector<BSONElement>::const_iterator i = shapesEltArray.begin(); i != shapesEltArray.end();
+ ++i) {
+ const BSONElement& elt = *i;
- TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
-
- vector<BSONObj> shapes = getShapes(planCache);
- ASSERT_EQUALS(shapes.size(), 1U);
- ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
- ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getParsed().getSort());
- ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getParsed().getProj());
- }
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
- /**
- * Tests for planCacheClear
- */
-
- TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- OperationContextNoop txn;
-
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- ASSERT_EQUALS(getShapes(planCache).size(), 1U);
-
- // Clear cache and confirm number of keys afterwards.
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSONObj()));
- ASSERT_EQUALS(getShapes(planCache).size(), 0U);
- }
+ // Check required fields.
+ // query
+ BSONElement queryElt = obj.getField("query");
+ ASSERT_TRUE(queryElt.isABSONObj());
- /**
- * Tests for PlanCacheCommand::makeCacheKey
- * Mostly validation on the input parameters
- */
-
- TEST(PlanCacheCommandsTest, Canonicalize) {
- // Invalid parameters
- PlanCache planCache;
- CanonicalQuery* cqRaw;
- OperationContextNoop txn;
-
- // Missing query field
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{}"), &cqRaw));
- // Query needs to be an object
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: 1}"), &cqRaw));
- // Sort needs to be an object
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: 1}"),
- &cqRaw));
- // Bad query (invalid sort order)
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: {a: 0}}"),
- &cqRaw));
-
- // Valid parameters
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> query(cqRaw);
-
-
- // Equivalent query should generate same key.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {b: 1, a: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> equivQuery(cqRaw);
- ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
-
- // Sort query should generate different key from unsorted query.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery1(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
-
- // Confirm sort arguments are properly delimited (SERVER-17158)
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery2(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
-
- // Changing order and/or value of predicates should not change key
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> sortQuery3(cqRaw);
- ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
-
- // Projected query should generate different key from unprojected query.
- ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns,
- fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"), &cqRaw));
- unique_ptr<CanonicalQuery> projectionQuery(cqRaw);
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
- }
+ // sort
+ BSONElement sortElt = obj.getField("sort");
+ ASSERT_TRUE(sortElt.isABSONObj());
+
+ // projection
+ BSONElement projectionElt = obj.getField("projection");
+ ASSERT_TRUE(projectionElt.isABSONObj());
- /**
- * Tests for planCacheClear (single query shape)
- */
-
- TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
- PlanCache planCache;
- OperationContextNoop txn;
-
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: 12345}")));
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: /keyisnotregex/}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns,
- fromjson("{query: {a: {$no_such_op: 1}}}")));
- // Sort present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{sort: {a: 1}}")));
- // Projection present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns,
- fromjson("{projection: {_id: 0, a: 1}}")));
+ // All fields OK. Append to vector.
+ shapes.push_back(obj.getOwned());
}
+ return shapes;
+}
- TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
- PlanCache planCache;
- OperationContextNoop txn;
+/**
+ * Utility function to create a SolutionCacheData
+ */
+SolutionCacheData* createSolutionCacheData() {
+ unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
+ scd->tree.reset(new PlanCacheIndexTree());
+ return scd.release();
+}
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: 1}}")));
+/**
+ * Utility function to create a PlanRankingDecision
+ */
+PlanRankingDecision* createDecision(size_t numPlans) {
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ for (size_t i = 0; i < numPlans; ++i) {
+ CommonStats common("COLLSCAN");
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ stats->specific.reset(new CollectionScanStats());
+ why->stats.mutableVector().push_back(stats.release());
+ why->scores.push_back(0U);
+ why->candidateOrder.push_back(i);
}
+ return why.release();
+}
+
+TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
+ PlanCache empty;
+ vector<BSONObj> shapes = getShapes(empty);
+ ASSERT_TRUE(shapes.empty());
+}
+
+TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+
+ vector<BSONObj> shapes = getShapes(planCache);
+ ASSERT_EQUALS(shapes.size(), 1U);
+ ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
+ ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getParsed().getSort());
+ ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getParsed().getProj());
+}
- TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
- // Create 2 canonical queries.
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cqA(cqRaw);
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{b: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cqB(cqRaw);
-
- // Create plan cache with 2 entries.
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cqA, solns, createDecision(1U));
- planCache.add(*cqB, solns, createDecision(1U));
-
- // Check keys in cache before dropping {b: 1}
- vector<BSONObj> shapesBefore = getShapes(planCache);
- ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
- << "projection" << cqA->getParsed().getProj());
- BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
- << "projection" << cqB->getParsed().getProj());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
-
- // Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
- BSONObjBuilder bob;
- OperationContextNoop txn;
-
- ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSON("query" << cqB->getQueryObj())));
- vector<BSONObj> shapesAfter = getShapes(planCache);
- ASSERT_EQUALS(shapesAfter.size(), 1U);
- ASSERT_EQUALS(shapesAfter[0], shapeA);
- }
+/**
+ * Tests for planCacheClear
+ */
- /**
- * Tests for planCacheListPlans
- */
-
- /**
- * Function to extract plan ID from BSON element.
- * Validates planID during extraction.
- * Each BSON element contains an embedded BSON object with the following layout:
- * {
- * plan: <plan_id>,
- * details: <plan_details>,
- * reason: <ranking_stats>,
- * feedback: <execution_stats>,
- * source: <source>
- * }
- * Compilation note: GCC 4.4 has issues with getPlan() declared as a function object.
- */
- BSONObj getPlan(const BSONElement& elt) {
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
+TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
- // Check required fields.
- // details
- BSONElement detailsElt = obj.getField("details");
- ASSERT_TRUE(detailsElt.isABSONObj());
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ OperationContextNoop txn;
- // reason
- BSONElement reasonElt = obj.getField("reason");
- ASSERT_TRUE(reasonElt.isABSONObj());
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+ ASSERT_EQUALS(getShapes(planCache).size(), 1U);
- // feedback
- BSONElement feedbackElt = obj.getField("feedback");
- ASSERT_TRUE(feedbackElt.isABSONObj());
+ // Clear cache and confirm number of keys afterwards.
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSONObj()));
+ ASSERT_EQUALS(getShapes(planCache).size(), 0U);
+}
- return obj.getOwned();
- }
+/**
+ * Tests for PlanCacheCommand::makeCacheKey
+ * Mostly validation on the input parameters
+ */
- /**
- * Utility function to get list of plan IDs for a query in the cache.
- */
- vector<BSONObj> getPlans(const PlanCache& planCache, const BSONObj& query,
- const BSONObj& sort, const BSONObj& projection) {
- OperationContextNoop txn;
-
- BSONObjBuilder bob;
- BSONObj cmdObj = BSON("query" << query << "sort" << sort << "projection" << projection);
- ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, cmdObj, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement plansElt = resultObj.getField("plans");
- ASSERT_EQUALS(plansElt.type(), mongo::Array);
- vector<BSONElement> planEltArray = plansElt.Array();
- ASSERT_FALSE(planEltArray.empty());
- vector<BSONObj> plans(planEltArray.size());
- std::transform(planEltArray.begin(), planEltArray.end(), plans.begin(), getPlan);
- return plans;
- }
+TEST(PlanCacheCommandsTest, Canonicalize) {
+ // Invalid parameters
+ PlanCache planCache;
+ CanonicalQuery* cqRaw;
+ OperationContextNoop txn;
+
+ // Missing query field
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{}"), &cqRaw));
+ // Query needs to be an object
+ ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: 1}"), &cqRaw));
+ // Sort needs to be an object
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: 1}"), &cqRaw));
+ // Bad query (invalid sort order)
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {}, sort: {a: 0}}"), &cqRaw));
+
+ // Valid parameters
+ ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> query(cqRaw);
+
+
+ // Equivalent query should generate same key.
+ ASSERT_OK(PlanCacheCommand::canonicalize(&txn, ns, fromjson("{query: {b: 1, a: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> equivQuery(cqRaw);
+ ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
+
+ // Sort query should generate different key from unsorted query.
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery1(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
+
+ // Confirm sort arguments are properly delimited (SERVER-17158)
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery2(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
+
+ // Changing order and/or value of predicates should not change key
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> sortQuery3(cqRaw);
+ ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
+
+ // Projected query should generate different key from unprojected query.
+ ASSERT_OK(PlanCacheCommand::canonicalize(
+ &txn, ns, fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"), &cqRaw));
+ unique_ptr<CanonicalQuery> projectionQuery(cqRaw);
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
+}
- TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
- PlanCache planCache;
- BSONObjBuilder ignored;
- OperationContextNoop txn;
-
- // Missing query field is not ok.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, BSONObj(), &ignored));
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: 12345}"),
- &ignored));
- ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: /keyisnotregex/}"),
- &ignored));
- }
+/**
+ * Tests for planCacheClear (single query shape)
+ */
- TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
- // Leave the plan cache empty.
- PlanCache planCache;
- OperationContextNoop txn;
+TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ // Query field type must be BSON object.
+ ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: 12345}")));
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: /keyisnotregex/}")));
+ // Query must pass canonicalization.
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: {$no_such_op: 1}}}")));
+ // Sort present without query is an error.
+ ASSERT_NOT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{sort: {a: 1}}")));
+ // Projection present without query is an error.
+ ASSERT_NOT_OK(
+ PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{projection: {_id: 0, a: 1}}")));
+}
+
+TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, fromjson("{query: {a: 1}}")));
+}
+
+TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
+ // Create 2 canonical queries.
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cqA(cqRaw);
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{b: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cqB(cqRaw);
+
+ // Create plan cache with 2 entries.
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cqA, solns, createDecision(1U));
+ planCache.add(*cqB, solns, createDecision(1U));
+
+ // Check keys in cache before dropping {b: 1}
+ vector<BSONObj> shapesBefore = getShapes(planCache);
+ ASSERT_EQUALS(shapesBefore.size(), 2U);
+ BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
+ << "projection" << cqA->getParsed().getProj());
+ BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
+ << "projection" << cqB->getParsed().getProj());
+ ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
+ ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
+
+ // Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
+ BSONObjBuilder bob;
+ OperationContextNoop txn;
+
+ ASSERT_OK(PlanCacheClear::clear(&txn, &planCache, ns, BSON("query" << cqB->getQueryObj())));
+ vector<BSONObj> shapesAfter = getShapes(planCache);
+ ASSERT_EQUALS(shapesAfter.size(), 1U);
+ ASSERT_EQUALS(shapesAfter[0], shapeA);
+}
- BSONObjBuilder ignored;
- ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: {a: 1}}"), &ignored));
- }
+/**
+ * Tests for planCacheListPlans
+ */
- TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
-
- vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(),
- cq->getParsed().getSort(), cq->getParsed().getProj());
- ASSERT_EQUALS(plans.size(), 1U);
- }
+/**
+ * Function to extract plan ID from BSON element.
+ * Validates planID during extraction.
+ * Each BSON element contains an embedded BSON object with the following layout:
+ * {
+ * plan: <plan_id>,
+ * details: <plan_details>,
+ * reason: <ranking_stats>,
+ * feedback: <execution_stats>,
+ * source: <source>
+ * }
+ * Compilation note: GCC 4.4 has issues with getPlan() declared as a function object.
+ */
+BSONObj getPlan(const BSONElement& elt) {
+ ASSERT_TRUE(elt.isABSONObj());
+ BSONObj obj = elt.Obj();
- TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
- // Create a canonical query
- CanonicalQuery* cqRaw;
- ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- unique_ptr<CanonicalQuery> cq(cqRaw);
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- // Add cache entry with 2 solutions.
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(2U));
-
- vector<BSONObj> plans = getPlans(planCache, cq->getQueryObj(),
- cq->getParsed().getSort(), cq->getParsed().getProj());
- ASSERT_EQUALS(plans.size(), 2U);
- }
+ // Check required fields.
+ // details
+ BSONElement detailsElt = obj.getField("details");
+ ASSERT_TRUE(detailsElt.isABSONObj());
+
+ // reason
+ BSONElement reasonElt = obj.getField("reason");
+ ASSERT_TRUE(reasonElt.isABSONObj());
+
+ // feedback
+ BSONElement feedbackElt = obj.getField("feedback");
+ ASSERT_TRUE(feedbackElt.isABSONObj());
+
+ return obj.getOwned();
+}
+
+/**
+ * Utility function to get list of plan IDs for a query in the cache.
+ */
+vector<BSONObj> getPlans(const PlanCache& planCache,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const BSONObj& projection) {
+ OperationContextNoop txn;
+
+ BSONObjBuilder bob;
+ BSONObj cmdObj = BSON("query" << query << "sort" << sort << "projection" << projection);
+ ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, cmdObj, &bob));
+ BSONObj resultObj = bob.obj();
+ BSONElement plansElt = resultObj.getField("plans");
+ ASSERT_EQUALS(plansElt.type(), mongo::Array);
+ vector<BSONElement> planEltArray = plansElt.Array();
+ ASSERT_FALSE(planEltArray.empty());
+ vector<BSONObj> plans(planEltArray.size());
+ std::transform(planEltArray.begin(), planEltArray.end(), plans.begin(), getPlan);
+ return plans;
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
+ PlanCache planCache;
+ BSONObjBuilder ignored;
+ OperationContextNoop txn;
+
+ // Missing query field is not ok.
+ ASSERT_NOT_OK(PlanCacheListPlans::list(&txn, planCache, ns, BSONObj(), &ignored));
+ // Query field type must be BSON object.
+ ASSERT_NOT_OK(
+ PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: 12345}"), &ignored));
+ ASSERT_NOT_OK(PlanCacheListPlans::list(
+ &txn, planCache, ns, fromjson("{query: /keyisnotregex/}"), &ignored));
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
+ // Leave the plan cache empty.
+ PlanCache planCache;
+ OperationContextNoop txn;
+
+ BSONObjBuilder ignored;
+ ASSERT_OK(PlanCacheListPlans::list(&txn, planCache, ns, fromjson("{query: {a: 1}}"), &ignored));
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(1U));
+
+ vector<BSONObj> plans = getPlans(
+ planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ ASSERT_EQUALS(plans.size(), 1U);
+}
+
+TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
+ // Create a canonical query
+ CanonicalQuery* cqRaw;
+ ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
+ unique_ptr<CanonicalQuery> cq(cqRaw);
+
+ // Plan cache with one entry
+ PlanCache planCache;
+ QuerySolution qs;
+ qs.cacheData.reset(createSolutionCacheData());
+ // Add cache entry with 2 solutions.
+ std::vector<QuerySolution*> solns;
+ solns.push_back(&qs);
+ solns.push_back(&qs);
+ planCache.add(*cq, solns, createDecision(2U));
+
+ vector<BSONObj> plans = getPlans(
+ planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ ASSERT_EQUALS(plans.size(), 2U);
+}
} // namespace
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index 5479ee92a40..68c47676527 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -52,98 +52,99 @@
namespace mongo {
- using std::min;
- using std::string;
- using std::stringstream;
+using std::min;
+using std::string;
+using std::stringstream;
- class CmdRenameCollection : public Command {
- public:
- CmdRenameCollection() : Command( "renameCollection" ) {}
- virtual bool adminOnly() const {
- return true;
+class CmdRenameCollection : public Command {
+public:
+ CmdRenameCollection() : Command("renameCollection") {}
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return rename_collection::checkAuthForRenameCollectionCommand(client, dbname, cmdObj);
+ }
+ virtual void help(stringstream& help) const {
+ help << " example: { renameCollection: foo.a, to: bar.b }";
+ }
+
+ static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(txn);
+ if (db->dropCollection(txn, collName).isOK()) {
+ // ignoring failure case
+ wunit.commit();
}
- virtual bool slaveOk() const {
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string source = cmdObj.getStringField(name.c_str());
+ string target = cmdObj.getStringField("to");
+
+ if (!NamespaceString::validCollectionComponent(target.c_str())) {
+ errmsg = "invalid collection name: " + target;
return false;
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return rename_collection::checkAuthForRenameCollectionCommand(client, dbname, cmdObj);
- }
- virtual void help( stringstream &help ) const {
- help << " example: { renameCollection: foo.a, to: bar.b }";
- }
-
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
- // ignoring failure case
- wunit.commit();
- }
+ if (source.empty() || target.empty()) {
+ errmsg = "invalid command syntax";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string source = cmdObj.getStringField( name.c_str() );
- string target = cmdObj.getStringField( "to" );
-
- if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
- errmsg = "invalid collection name: " + target;
+ if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
+ repl::ReplicationCoordinator::modeNone)) {
+ if (NamespaceString(source).isOplog()) {
+ errmsg = "can't rename live oplog while replicating";
return false;
}
- if ( source.empty() || target.empty() ) {
- errmsg = "invalid command syntax";
+ if (NamespaceString(target).isOplog()) {
+ errmsg = "can't rename to live oplog while replicating";
return false;
}
+ }
- if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
- repl::ReplicationCoordinator::modeNone)) {
- if (NamespaceString(source).isOplog()) {
- errmsg = "can't rename live oplog while replicating";
- return false;
- }
- if (NamespaceString(target).isOplog()) {
- errmsg = "can't rename to live oplog while replicating";
- return false;
- }
- }
-
- if (NamespaceString::oplog(source) != NamespaceString::oplog(target)) {
- errmsg =
- "If either the source or target of a rename is an oplog name, both must be";
- return false;
- }
-
- Status sourceStatus = userAllowedWriteNS(source);
- if (!sourceStatus.isOK()) {
- errmsg = "error with source namespace: " + sourceStatus.reason();
- return false;
- }
+ if (NamespaceString::oplog(source) != NamespaceString::oplog(target)) {
+ errmsg = "If either the source or target of a rename is an oplog name, both must be";
+ return false;
+ }
- Status targetStatus = userAllowedWriteNS(target);
- if (!targetStatus.isOK()) {
- errmsg = "error with target namespace: " + targetStatus.reason();
- return false;
- }
+ Status sourceStatus = userAllowedWriteNS(source);
+ if (!sourceStatus.isOK()) {
+ errmsg = "error with source namespace: " + sourceStatus.reason();
+ return false;
+ }
- if (NamespaceString(source).coll() == "system.indexes"
- || NamespaceString(target).coll() == "system.indexes") {
- errmsg = "renaming system.indexes is not allowed";
- return false;
- }
+ Status targetStatus = userAllowedWriteNS(target);
+ if (!targetStatus.isOK()) {
+ errmsg = "error with target namespace: " + targetStatus.reason();
+ return false;
+ }
- return appendCommandStatus(result,
- renameCollection(txn,
- NamespaceString(source),
- NamespaceString(target),
- cmdObj["dropTarget"].trueValue(),
- cmdObj["stayTemp"].trueValue()));
+ if (NamespaceString(source).coll() == "system.indexes" ||
+ NamespaceString(target).coll() == "system.indexes") {
+ errmsg = "renaming system.indexes is not allowed";
+ return false;
}
- } cmdrenamecollection;
-} // namespace mongo
+ return appendCommandStatus(result,
+ renameCollection(txn,
+ NamespaceString(source),
+ NamespaceString(target),
+ cmdObj["dropTarget"].trueValue(),
+ cmdObj["stayTemp"].trueValue()));
+ }
+} cmdrenamecollection;
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/rename_collection.h b/src/mongo/db/commands/rename_collection.h
index f8651bccd4c..a7e3c6beed4 100644
--- a/src/mongo/db/commands/rename_collection.h
+++ b/src/mongo/db/commands/rename_collection.h
@@ -36,15 +36,13 @@
namespace mongo {
- class ClientBasic;
+class ClientBasic;
namespace rename_collection {
- Status checkAuthForRenameCollectionCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
-
-} // namespace rename_collection
-} // namespace mongo
-
+Status checkAuthForRenameCollectionCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+} // namespace rename_collection
+} // namespace mongo
diff --git a/src/mongo/db/commands/rename_collection_common.cpp b/src/mongo/db/commands/rename_collection_common.cpp
index feec6f4f135..d9818962cc3 100644
--- a/src/mongo/db/commands/rename_collection_common.cpp
+++ b/src/mongo/db/commands/rename_collection_common.cpp
@@ -42,63 +42,61 @@
namespace mongo {
namespace rename_collection {
- Status checkAuthForRenameCollectionCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- NamespaceString sourceNS = NamespaceString(cmdObj.getStringField("renameCollection"));
- NamespaceString targetNS = NamespaceString(cmdObj.getStringField("to"));
- bool dropTarget = cmdObj["dropTarget"].trueValue();
+Status checkAuthForRenameCollectionCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ NamespaceString sourceNS = NamespaceString(cmdObj.getStringField("renameCollection"));
+ NamespaceString targetNS = NamespaceString(cmdObj.getStringField("to"));
+ bool dropTarget = cmdObj["dropTarget"].trueValue();
- if (sourceNS.db() == targetNS.db() && !sourceNS.isSystem() && !targetNS.isSystem()) {
- // If renaming within the same database, then if you have renameCollectionSameDB and
- // either can read both of source and dest collections or *can't* read either of source
- // or dest collection, then you get can do the rename, even without insert on the
- // destination collection.
- bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(sourceNS.db()),
- ActionType::renameCollectionSameDB);
+ if (sourceNS.db() == targetNS.db() && !sourceNS.isSystem() && !targetNS.isSystem()) {
+ // If renaming within the same database, then if you have renameCollectionSameDB and
+ // either can read both of source and dest collections or *can't* read either of source
+ // or dest collection, then you get can do the rename, even without insert on the
+ // destination collection.
+ bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(sourceNS.db()), ActionType::renameCollectionSameDB);
- bool canDropTargetIfNeeded = true;
- if (dropTarget) {
- canDropTargetIfNeeded =
- AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS),
- ActionType::dropCollection);
- }
-
- bool canReadSrc = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(sourceNS), ActionType::find);
- bool canReadDest = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS), ActionType::find);
-
- if (canRename && canDropTargetIfNeeded && (canReadSrc || !canReadDest)) {
- return Status::OK();
- }
+ bool canDropTargetIfNeeded = true;
+ if (dropTarget) {
+ canDropTargetIfNeeded =
+ AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), ActionType::dropCollection);
}
- // Check privileges on source collection
- ActionSet actions;
- actions.addAction(ActionType::find);
- actions.addAction(ActionType::dropCollection);
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(sourceNS), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ bool canReadSrc = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(sourceNS), ActionType::find);
+ bool canReadDest = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), ActionType::find);
- // Check privileges on dest collection
- actions.removeAllActions();
- actions.addAction(ActionType::insert);
- actions.addAction(ActionType::createIndex);
- if (dropTarget) {
- actions.addAction(ActionType::dropCollection);
- }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(targetNS), actions)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ if (canRename && canDropTargetIfNeeded && (canReadSrc || !canReadDest)) {
+ return Status::OK();
}
+ }
+
+ // Check privileges on source collection
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ actions.addAction(ActionType::dropCollection);
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(sourceNS), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
- return Status::OK();
+ // Check privileges on dest collection
+ actions.removeAllActions();
+ actions.addAction(ActionType::insert);
+ actions.addAction(ActionType::createIndex);
+ if (dropTarget) {
+ actions.addAction(ActionType::dropCollection);
+ }
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(targetNS), actions)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
-} // namespace rename_collection
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace rename_collection
+} // namespace mongo
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 0598b67b9c3..5cf096fc511 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -42,83 +42,77 @@
namespace mongo {
- using std::string;
-
- class RepairCursorCmd : public Command {
- public:
- RepairCursorCmd() : Command("repairCursor") {}
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- Privilege p(parseResourcePattern(dbname, cmdObj), actions);
- if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
- return Status::OK();
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+using std::string;
+
+class RepairCursorCmd : public Command {
+public:
+ RepairCursorCmd() : Command("repairCursor") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ Privilege p(parseResourcePattern(dbname, cmdObj), actions);
+ if (AuthorizationSession::get(client)->isAuthorizedForPrivilege(p))
+ return Status::OK();
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ NamespaceString ns(parseNs(dbname, cmdObj));
+
+ AutoGetCollectionForRead ctx(txn, ns.ns());
+
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::NamespaceNotFound, "ns does not exist: " + ns.ns()));
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- NamespaceString ns(parseNs(dbname, cmdObj));
-
- AutoGetCollectionForRead ctx(txn, ns.ns());
-
- Collection* collection = ctx.getCollection();
- if (!collection) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotFound,
- "ns does not exist: " + ns.ns()));
- }
-
- auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
- if (!cursor) {
- return appendCommandStatus(result,
- Status(ErrorCodes::CommandNotSupported,
- "repair iterator not supported"));
- }
-
- std::unique_ptr<WorkingSet> ws(new WorkingSet());
- std::unique_ptr<MultiIteratorStage> stage(new MultiIteratorStage(txn, ws.get(),
- collection));
- stage->addIterator(std::move(cursor));
-
- PlanExecutor* rawExec;
- Status execStatus = PlanExecutor::make(txn,
- ws.release(),
- stage.release(),
- collection,
- PlanExecutor::YIELD_AUTO,
- &rawExec);
- invariant(execStatus.isOK());
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // 'exec' will be used in getMore(). It was automatically registered on construction
- // due to the auto yield policy, so it could yield during plan selection. We deregister
- // it now so that it can be registed with ClientCursor.
- exec->deregisterExec();
- exec->saveState();
-
- // ClientCursors' constructor inserts them into a global map that manages their
- // lifetimes. That is why the next line isn't leaky.
- ClientCursor* cc = new ClientCursor(collection->getCursorManager(),
- exec.release(),
- ns.ns());
-
- appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &result);
-
- return true;
-
+ auto cursor = collection->getRecordStore()->getCursorForRepair(txn);
+ if (!cursor) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::CommandNotSupported, "repair iterator not supported"));
}
- } repairCursorCmd;
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<MultiIteratorStage> stage(
+ new MultiIteratorStage(txn, ws.get(), collection));
+ stage->addIterator(std::move(cursor));
+
+ PlanExecutor* rawExec;
+ Status execStatus = PlanExecutor::make(
+ txn, ws.release(), stage.release(), collection, PlanExecutor::YIELD_AUTO, &rawExec);
+ invariant(execStatus.isOK());
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // 'exec' will be used in getMore(). It was automatically registered on construction
+ // due to the auto yield policy, so it could yield during plan selection. We deregister
+ // it now so that it can be registed with ClientCursor.
+ exec->deregisterExec();
+ exec->saveState();
+
+ // ClientCursors' constructor inserts them into a global map that manages their
+ // lifetimes. That is why the next line isn't leaky.
+ ClientCursor* cc =
+ new ClientCursor(collection->getCursorManager(), exec.release(), ns.ns());
+
+ appendCursorResponseObject(cc->cursorid(), ns.ns(), BSONArray(), &result);
+
+ return true;
+ }
+} repairCursorCmd;
}
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index d2bf917f6c4..d7b3324efe6 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -54,273 +54,269 @@
namespace mongo {
- using std::endl;
- using std::map;
- using std::string;
- using std::stringstream;
-
- class CmdServerStatus : public Command {
- public:
-
- CmdServerStatus()
- : Command("serverStatus", true),
- _started( curTimeMillis64() ),
- _runCalled( false ) {
- }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool slaveOk() const { return true; }
+using std::endl;
+using std::map;
+using std::string;
+using std::stringstream;
+
+class CmdServerStatus : public Command {
+public:
+ CmdServerStatus()
+ : Command("serverStatus", true), _started(curTimeMillis64()), _runCalled(false) {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help( stringstream& help ) const {
- help << "returns lots of administrative server statistics";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::serverStatus);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- _runCalled = true;
-
- long long start = Listener::getElapsedTimeMillis();
- BSONObjBuilder timeBuilder(256);
-
- const auto authSession = AuthorizationSession::get(ClientBasic::getCurrent());
-
- // --- basic fields that are global
-
- result.append("host", prettyHostName() );
- result.append("version", versionString);
- result.append("process", serverGlobalParams.binaryName);
- result.append("pid", ProcessId::getCurrent().asLongLong());
- result.append("uptime", (double) (time(0) - serverGlobalParams.started));
- result.append("uptimeMillis", (long long)(curTimeMillis64()-_started));
- result.append("uptimeEstimate",(double) (start/1000));
- result.appendDate( "localTime" , jsTime() );
-
- timeBuilder.appendNumber( "after basic" , Listener::getElapsedTimeMillis() - start );
-
- // --- all sections
-
- for ( SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i ) {
- ServerStatusSection* section = i->second;
-
- std::vector<Privilege> requiredPrivileges;
- section->addRequiredPrivileges(&requiredPrivileges);
- if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
- continue;
-
- bool include = section->includeByDefault();
-
- BSONElement e = cmdObj[section->getSectionName()];
- if ( e.type() ) {
- include = e.trueValue();
- }
-
- if ( ! include )
- continue;
-
- BSONObj data = section->generateSection(txn, e);
- if ( data.isEmpty() )
- continue;
-
- result.append( section->getSectionName(), data );
- timeBuilder.appendNumber( static_cast<string>(str::stream() << "after " << section->getSectionName()),
- Listener::getElapsedTimeMillis() - start );
- }
+ virtual void help(stringstream& help) const {
+ help << "returns lots of administrative server statistics";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::serverStatus);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ _runCalled = true;
- // --- counters
- bool includeMetricTree = MetricTree::theMetricTree != NULL;
- if ( cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue() )
- includeMetricTree = false;
+ long long start = Listener::getElapsedTimeMillis();
+ BSONObjBuilder timeBuilder(256);
- if ( includeMetricTree ) {
- MetricTree::theMetricTree->appendTo( result );
- }
+ const auto authSession = AuthorizationSession::get(ClientBasic::getCurrent());
- // --- some hard coded global things hard to pull out
+ // --- basic fields that are global
- {
- RamLog::LineIterator rl(RamLog::get("warnings"));
- if (rl.lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
- BSONArrayBuilder arr(result.subarrayStart("warnings"));
- while (rl.more()) {
- arr.append(rl.next());
- }
- arr.done();
- }
- }
+ result.append("host", prettyHostName());
+ result.append("version", versionString);
+ result.append("process", serverGlobalParams.binaryName);
+ result.append("pid", ProcessId::getCurrent().asLongLong());
+ result.append("uptime", (double)(time(0) - serverGlobalParams.started));
+ result.append("uptimeMillis", (long long)(curTimeMillis64() - _started));
+ result.append("uptimeEstimate", (double)(start / 1000));
+ result.appendDate("localTime", jsTime());
+
+ timeBuilder.appendNumber("after basic", Listener::getElapsedTimeMillis() - start);
+
+ // --- all sections
+
+ for (SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i) {
+ ServerStatusSection* section = i->second;
+
+ std::vector<Privilege> requiredPrivileges;
+ section->addRequiredPrivileges(&requiredPrivileges);
+ if (!authSession->isAuthorizedForPrivileges(requiredPrivileges))
+ continue;
+
+ bool include = section->includeByDefault();
- timeBuilder.appendNumber( "at end" , Listener::getElapsedTimeMillis() - start );
- if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
- BSONObj t = timeBuilder.obj();
- log() << "serverStatus was very slow: " << t << endl;
- result.append( "timing" , t );
+ BSONElement e = cmdObj[section->getSectionName()];
+ if (e.type()) {
+ include = e.trueValue();
}
- return true;
+ if (!include)
+ continue;
+
+ BSONObj data = section->generateSection(txn, e);
+ if (data.isEmpty())
+ continue;
+
+ result.append(section->getSectionName(), data);
+ timeBuilder.appendNumber(
+ static_cast<string>(str::stream() << "after " << section->getSectionName()),
+ Listener::getElapsedTimeMillis() - start);
+ }
+
+ // --- counters
+ bool includeMetricTree = MetricTree::theMetricTree != NULL;
+ if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue())
+ includeMetricTree = false;
+
+ if (includeMetricTree) {
+ MetricTree::theMetricTree->appendTo(result);
}
- void addSection( ServerStatusSection* section ) {
- verify( ! _runCalled );
- if ( _sections == 0 ) {
- _sections = new SectionMap();
+ // --- some hard coded global things hard to pull out
+
+ {
+ RamLog::LineIterator rl(RamLog::get("warnings"));
+ if (rl.lastWrite() >= time(0) - (10 * 60)) { // only show warnings from last 10 minutes
+ BSONArrayBuilder arr(result.subarrayStart("warnings"));
+ while (rl.more()) {
+ arr.append(rl.next());
+ }
+ arr.done();
}
- (*_sections)[section->getSectionName()] = section;
}
- private:
- const unsigned long long _started;
- bool _runCalled;
+ timeBuilder.appendNumber("at end", Listener::getElapsedTimeMillis() - start);
+ if (Listener::getElapsedTimeMillis() - start > 1000) {
+ BSONObj t = timeBuilder.obj();
+ log() << "serverStatus was very slow: " << t << endl;
+ result.append("timing", t);
+ }
+
+ return true;
+ }
+
+ void addSection(ServerStatusSection* section) {
+ verify(!_runCalled);
+ if (_sections == 0) {
+ _sections = new SectionMap();
+ }
+ (*_sections)[section->getSectionName()] = section;
+ }
+
+private:
+ const unsigned long long _started;
+ bool _runCalled;
+
+ typedef map<string, ServerStatusSection*> SectionMap;
+ static SectionMap* _sections;
+} cmdServerStatus;
+
+
+CmdServerStatus::SectionMap* CmdServerStatus::_sections = 0;
- typedef map< string , ServerStatusSection* > SectionMap;
- static SectionMap* _sections;
- } cmdServerStatus;
+ServerStatusSection::ServerStatusSection(const string& sectionName) : _sectionName(sectionName) {
+ cmdServerStatus.addSection(this);
+}
+
+OpCounterServerStatusSection::OpCounterServerStatusSection(const string& sectionName,
+ OpCounters* counters)
+ : ServerStatusSection(sectionName), _counters(counters) {}
+
+BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
+ const BSONElement& configElement) const {
+ return _counters->getObj();
+}
+
+OpCounterServerStatusSection globalOpCounterServerStatusSection("opcounters", &globalOpCounters);
- CmdServerStatus::SectionMap* CmdServerStatus::_sections = 0;
+namespace {
- ServerStatusSection::ServerStatusSection( const string& sectionName )
- : _sectionName( sectionName ) {
- cmdServerStatus.addSection( this );
+// some universal sections
+
+class Connections : public ServerStatusSection {
+public:
+ Connections() : ServerStatusSection("connections") {}
+ virtual bool includeByDefault() const {
+ return true;
}
- OpCounterServerStatusSection::OpCounterServerStatusSection( const string& sectionName, OpCounters* counters )
- : ServerStatusSection( sectionName ), _counters( counters ){
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder bb;
+ bb.append("current", Listener::globalTicketHolder.used());
+ bb.append("available", Listener::globalTicketHolder.available());
+ bb.append("totalCreated", Listener::globalConnectionNumber.load());
+ return bb.obj();
}
- BSONObj OpCounterServerStatusSection::generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- return _counters->getObj();
+} connections;
+
+class ExtraInfo : public ServerStatusSection {
+public:
+ ExtraInfo() : ServerStatusSection("extra_info") {}
+ virtual bool includeByDefault() const {
+ return true;
}
-
- OpCounterServerStatusSection globalOpCounterServerStatusSection( "opcounters", &globalOpCounters );
-
-
- namespace {
-
- // some universal sections
-
- class Connections : public ServerStatusSection {
- public:
- Connections() : ServerStatusSection( "connections" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- BSONObjBuilder bb;
- bb.append( "current" , Listener::globalTicketHolder.used() );
- bb.append( "available" , Listener::globalTicketHolder.available() );
- bb.append( "totalCreated" , Listener::globalConnectionNumber.load() );
- return bb.obj();
- }
- } connections;
-
- class ExtraInfo : public ServerStatusSection {
- public:
- ExtraInfo() : ServerStatusSection( "extra_info" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
-
- BSONObjBuilder bb;
-
- bb.append("note", "fields vary by platform");
- ProcessInfo p;
- p.getExtraInfo(bb);
-
- return bb.obj();
- }
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder bb;
- } extraInfo;
+ bb.append("note", "fields vary by platform");
+ ProcessInfo p;
+ p.getExtraInfo(bb);
+ return bb.obj();
+ }
- class Asserts : public ServerStatusSection {
- public:
- Asserts() : ServerStatusSection( "asserts" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+} extraInfo;
- BSONObjBuilder asserts;
- asserts.append( "regular" , assertionCount.regular );
- asserts.append( "warning" , assertionCount.warning );
- asserts.append( "msg" , assertionCount.msg );
- asserts.append( "user" , assertionCount.user );
- asserts.append( "rollovers" , assertionCount.rollovers );
- return asserts.obj();
- }
-
- } asserts;
+class Asserts : public ServerStatusSection {
+public:
+ Asserts() : ServerStatusSection("asserts") {}
+ virtual bool includeByDefault() const {
+ return true;
+ }
- class Network : public ServerStatusSection {
- public:
- Network() : ServerStatusSection( "network" ){}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder asserts;
+ asserts.append("regular", assertionCount.regular);
+ asserts.append("warning", assertionCount.warning);
+ asserts.append("msg", assertionCount.msg);
+ asserts.append("user", assertionCount.user);
+ asserts.append("rollovers", assertionCount.rollovers);
+ return asserts.obj();
+ }
- BSONObjBuilder b;
- networkCounter.append( b );
- return b.obj();
- }
-
- } network;
+} asserts;
-#ifdef MONGO_CONFIG_SSL
- class Security : public ServerStatusSection {
- public:
- Security() : ServerStatusSection( "security" ) {}
- virtual bool includeByDefault() const { return true; }
-
- BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const {
- BSONObj result;
- if (getSSLManager()) {
- result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
- }
- return result;
- }
- } security;
-#endif
+class Network : public ServerStatusSection {
+public:
+ Network() : ServerStatusSection("network") {}
+ virtual bool includeByDefault() const {
+ return true;
+ }
- class MemBase : public ServerStatusMetric {
- public:
- MemBase() : ServerStatusMetric(".mem.bits") {}
- virtual void appendAtLeaf( BSONObjBuilder& b ) const {
- b.append( "bits", sizeof(int*) == 4 ? 32 : 64 );
-
- ProcessInfo p;
- int v = 0;
- if ( p.supported() ) {
- b.appendNumber( "resident" , p.getResidentSize() );
- v = p.getVirtualMemorySize();
- b.appendNumber( "virtual" , v );
- b.appendBool( "supported" , true );
- }
- else {
- b.append( "note" , "not all mem info support on this platform" );
- b.appendBool( "supported" , false );
- }
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObjBuilder b;
+ networkCounter.append(b);
+ return b.obj();
+ }
- }
- } memBase;
+} network;
+
+#ifdef MONGO_CONFIG_SSL
+class Security : public ServerStatusSection {
+public:
+ Security() : ServerStatusSection("security") {}
+ virtual bool includeByDefault() const {
+ return true;
}
-}
+ BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
+ BSONObj result;
+ if (getSSLManager()) {
+ result = getSSLManager()->getSSLConfiguration().getServerStatusBSON();
+ }
+
+ return result;
+ }
+} security;
+#endif
+class MemBase : public ServerStatusMetric {
+public:
+ MemBase() : ServerStatusMetric(".mem.bits") {}
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ b.append("bits", sizeof(int*) == 4 ? 32 : 64);
+
+ ProcessInfo p;
+ int v = 0;
+ if (p.supported()) {
+ b.appendNumber("resident", p.getResidentSize());
+ v = p.getVirtualMemorySize();
+ b.appendNumber("virtual", v);
+ b.appendBool("supported", true);
+ } else {
+ b.append("note", "not all mem info support on this platform");
+ b.appendBool("supported", false);
+ }
+ }
+} memBase;
+}
+}
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index 695fac9b8e1..1ebe57280d7 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -38,60 +38,61 @@
namespace mongo {
- class ServerStatusSection {
- public:
- ServerStatusSection( const std::string& sectionName );
- virtual ~ServerStatusSection(){}
+class ServerStatusSection {
+public:
+ ServerStatusSection(const std::string& sectionName);
+ virtual ~ServerStatusSection() {}
- const std::string& getSectionName() const { return _sectionName; }
+ const std::string& getSectionName() const {
+ return _sectionName;
+ }
- /**
- * if this returns true, if the user doesn't mention this section
- * it will be included in the result
- * if they do : 1, it will be included
- * if they do : 0, it will not
- *
- * examples (section 'foo')
- * includeByDefault returning true
- * foo : 0 = not included
- * foo : 1 = included
- * foo missing = included
- * includeByDefault returning false
- * foo : 0 = not included
- * foo : 1 = included
- * foo missing = false
- */
- virtual bool includeByDefault() const = 0;
-
- /**
- * Adds the privileges that are required to view this section
- * TODO: Remove this empty default implementation and implement for every section.
- */
- virtual void addRequiredPrivileges(std::vector<Privilege>* out) {};
+ /**
+ * if this returns true, if the user doesn't mention this section
+ * it will be included in the result
+ * if they do : 1, it will be included
+ * if they do : 0, it will not
+ *
+ * examples (section 'foo')
+ * includeByDefault returning true
+ * foo : 0 = not included
+ * foo : 1 = included
+ * foo missing = included
+ * includeByDefault returning false
+ * foo : 0 = not included
+ * foo : 1 = included
+ * foo missing = false
+ */
+ virtual bool includeByDefault() const = 0;
- /**
- * actually generate the result
- * @param configElement the element from the actual command related to this section
- * so if the section is 'foo', this is cmdObj['foo']
- */
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const = 0;
+ /**
+ * Adds the privileges that are required to view this section
+ * TODO: Remove this empty default implementation and implement for every section.
+ */
+ virtual void addRequiredPrivileges(std::vector<Privilege>* out){};
- private:
- const std::string _sectionName;
- };
+ /**
+ * actually generate the result
+ * @param configElement the element from the actual command related to this section
+ * so if the section is 'foo', this is cmdObj['foo']
+ */
+ virtual BSONObj generateSection(OperationContext* txn,
+ const BSONElement& configElement) const = 0;
- class OpCounterServerStatusSection : public ServerStatusSection {
- public:
- OpCounterServerStatusSection( const std::string& sectionName, OpCounters* counters );
- virtual bool includeByDefault() const { return true; }
-
- virtual BSONObj generateSection(OperationContext* txn,
- const BSONElement& configElement) const;
+private:
+ const std::string _sectionName;
+};
- private:
- const OpCounters* _counters;
- };
+class OpCounterServerStatusSection : public ServerStatusSection {
+public:
+ OpCounterServerStatusSection(const std::string& sectionName, OpCounters* counters);
+ virtual bool includeByDefault() const {
+ return true;
+ }
-}
+ virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const;
+private:
+ const OpCounters* _counters;
+};
+}
diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp
index e329a3724a4..bb564a9bcb6 100644
--- a/src/mongo/db/commands/server_status_internal.cpp
+++ b/src/mongo/db/commands/server_status_internal.cpp
@@ -37,53 +37,53 @@
namespace mongo {
- using std::cerr;
- using std::endl;
- using std::map;
- using std::string;
+using std::cerr;
+using std::endl;
+using std::map;
+using std::string;
- using namespace mongoutils;
+using namespace mongoutils;
- MetricTree* MetricTree::theMetricTree = NULL;
+MetricTree* MetricTree::theMetricTree = NULL;
- void MetricTree::add( ServerStatusMetric* metric ) {
- string name = metric->getMetricName();
- if ( name[0] == '.' )
- _add( name.substr(1), metric );
- else
- _add( str::stream() << "metrics." << name, metric );
- }
-
- void MetricTree::_add( const string& path, ServerStatusMetric* metric ) {
- size_t idx = path.find( "." );
- if ( idx == string::npos ) {
- _metrics[path] = metric;
- return;
- }
+void MetricTree::add(ServerStatusMetric* metric) {
+ string name = metric->getMetricName();
+ if (name[0] == '.')
+ _add(name.substr(1), metric);
+ else
+ _add(str::stream() << "metrics." << name, metric);
+}
- string myLevel = path.substr( 0, idx );
- if ( _metrics.count( myLevel ) > 0 ) {
- cerr << "metric conflict on: " << myLevel << endl;
- fassertFailed( 16461 );
- }
+void MetricTree::_add(const string& path, ServerStatusMetric* metric) {
+ size_t idx = path.find(".");
+ if (idx == string::npos) {
+ _metrics[path] = metric;
+ return;
+ }
- MetricTree*& sub = _subtrees[myLevel];
- if ( ! sub )
- sub = new MetricTree();
- sub->_add( path.substr( idx + 1 ), metric );
+ string myLevel = path.substr(0, idx);
+ if (_metrics.count(myLevel) > 0) {
+ cerr << "metric conflict on: " << myLevel << endl;
+ fassertFailed(16461);
}
- void MetricTree::appendTo( BSONObjBuilder& b ) const {
- for ( map<string,ServerStatusMetric*>::const_iterator i = _metrics.begin(); i != _metrics.end(); ++i ) {
- i->second->appendAtLeaf( b );
- }
+ MetricTree*& sub = _subtrees[myLevel];
+ if (!sub)
+ sub = new MetricTree();
+ sub->_add(path.substr(idx + 1), metric);
+}
- for ( map<string,MetricTree*>::const_iterator i = _subtrees.begin(); i != _subtrees.end(); ++i ) {
- BSONObjBuilder bb( b.subobjStart( i->first ) );
- i->second->appendTo( bb );
- bb.done();
- }
+void MetricTree::appendTo(BSONObjBuilder& b) const {
+ for (map<string, ServerStatusMetric*>::const_iterator i = _metrics.begin(); i != _metrics.end();
+ ++i) {
+ i->second->appendAtLeaf(b);
}
+ for (map<string, MetricTree*>::const_iterator i = _subtrees.begin(); i != _subtrees.end();
+ ++i) {
+ BSONObjBuilder bb(b.subobjStart(i->first));
+ i->second->appendTo(bb);
+ bb.done();
+ }
+}
}
-
diff --git a/src/mongo/db/commands/server_status_internal.h b/src/mongo/db/commands/server_status_internal.h
index 37e3bbf3439..6f5e15d5d33 100644
--- a/src/mongo/db/commands/server_status_internal.h
+++ b/src/mongo/db/commands/server_status_internal.h
@@ -37,21 +37,20 @@
namespace mongo {
- class ServerStatusMetric;
+class ServerStatusMetric;
- class MetricTree {
- public:
- void add( ServerStatusMetric* metric );
+class MetricTree {
+public:
+ void add(ServerStatusMetric* metric);
- void appendTo( BSONObjBuilder& b ) const;
+ void appendTo(BSONObjBuilder& b) const;
- static MetricTree* theMetricTree;
- private:
+ static MetricTree* theMetricTree;
- void _add( const std::string& path, ServerStatusMetric* metric );
-
- std::map<std::string, MetricTree*> _subtrees;
- std::map<std::string, ServerStatusMetric*> _metrics;
- };
+private:
+ void _add(const std::string& path, ServerStatusMetric* metric);
+ std::map<std::string, MetricTree*> _subtrees;
+ std::map<std::string, ServerStatusMetric*> _metrics;
+};
}
diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp
index 1e999635751..999205b9704 100644
--- a/src/mongo/db/commands/server_status_metric.cpp
+++ b/src/mongo/db/commands/server_status_metric.cpp
@@ -34,25 +34,20 @@
namespace mongo {
- using std::string;
+using std::string;
- ServerStatusMetric::ServerStatusMetric(const string& nameIn)
- : _name( nameIn ),
- _leafName( _parseLeafName( nameIn ) ) {
-
- if ( MetricTree::theMetricTree == 0 )
- MetricTree::theMetricTree = new MetricTree();
- MetricTree::theMetricTree->add( this );
- }
-
- string ServerStatusMetric::_parseLeafName( const string& name ) {
- size_t idx = name.rfind( "." );
- if ( idx == string::npos )
- return name;
-
- return name.substr( idx + 1 );
- }
+ServerStatusMetric::ServerStatusMetric(const string& nameIn)
+ : _name(nameIn), _leafName(_parseLeafName(nameIn)) {
+ if (MetricTree::theMetricTree == 0)
+ MetricTree::theMetricTree = new MetricTree();
+ MetricTree::theMetricTree->add(this);
+}
+string ServerStatusMetric::_parseLeafName(const string& name) {
+ size_t idx = name.rfind(".");
+ if (idx == string::npos)
+ return name;
+ return name.substr(idx + 1);
+}
}
-
diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h
index 239c66fa96b..83fc5eff452 100644
--- a/src/mongo/db/commands/server_status_metric.h
+++ b/src/mongo/db/commands/server_status_metric.h
@@ -36,56 +36,57 @@
namespace mongo {
- class ServerStatusMetric {
- public:
- /**
- * @param name is a dotted path of a counter name
- * if name starts with . its treated as a path from the serverStatus root
- * otherwise it will live under the "counters" namespace
- * so foo.bar would be serverStatus().counters.foo.bar
- */
- ServerStatusMetric(const std::string& name);
- virtual ~ServerStatusMetric(){}
-
- std::string getMetricName() const { return _name; }
-
- virtual void appendAtLeaf( BSONObjBuilder& b ) const = 0;
-
- protected:
- static std::string _parseLeafName( const std::string& name );
-
- const std::string _name;
- const std::string _leafName;
- };
-
+class ServerStatusMetric {
+public:
/**
- * usage
- *
- * declared once
- * Counter counter;
- * ServerStatusMetricField myAwesomeCounterDisplay( "path.to.counter", &counter );
- *
- * call
- * counter.hit();
- *
- * will show up in db.serverStatus().metrics.path.to.counter
+ * @param name is a dotted path of a counter name
+ * if name starts with . its treated as a path from the serverStatus root
+ * otherwise it will live under the "counters" namespace
+ * so foo.bar would be serverStatus().counters.foo.bar
*/
- template< typename T >
- class ServerStatusMetricField : public ServerStatusMetric {
- public:
- ServerStatusMetricField( const std::string& name, const T* t )
- : ServerStatusMetric(name), _t(t) {
- }
+ ServerStatusMetric(const std::string& name);
+ virtual ~ServerStatusMetric() {}
- const T* get() { return _t; }
+ std::string getMetricName() const {
+ return _name;
+ }
- virtual void appendAtLeaf( BSONObjBuilder& b ) const {
- b.append( _leafName, *_t );
- }
+ virtual void appendAtLeaf(BSONObjBuilder& b) const = 0;
- private:
- const T* _t;
- };
+protected:
+ static std::string _parseLeafName(const std::string& name);
-}
+ const std::string _name;
+ const std::string _leafName;
+};
+/**
+ * usage
+ *
+ * declared once
+ * Counter counter;
+ * ServerStatusMetricField myAwesomeCounterDisplay( "path.to.counter", &counter );
+ *
+ * call
+ * counter.hit();
+ *
+ * will show up in db.serverStatus().metrics.path.to.counter
+ */
+template <typename T>
+class ServerStatusMetricField : public ServerStatusMetric {
+public:
+ ServerStatusMetricField(const std::string& name, const T* t)
+ : ServerStatusMetric(name), _t(t) {}
+
+ const T* get() {
+ return _t;
+ }
+
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ b.append(_leafName, *_t);
+ }
+
+private:
+ const T* _t;
+};
+}
diff --git a/src/mongo/db/commands/shutdown.h b/src/mongo/db/commands/shutdown.h
index 0adef08311d..c184c22aa4f 100644
--- a/src/mongo/db/commands/shutdown.h
+++ b/src/mongo/db/commands/shutdown.h
@@ -35,24 +35,31 @@
namespace mongo {
- class CmdShutdown : public Command {
- public:
- CmdShutdown() : Command("shutdown") { }
+class CmdShutdown : public Command {
+public:
+ CmdShutdown() : Command("shutdown") {}
- virtual bool requiresAuth() { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return true; }
- virtual bool slaveOk() const {
- return true;
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out);
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual bool requiresAuth() {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out);
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- protected:
- static void shutdownHelper();
- };
+protected:
+ static void shutdownHelper();
+};
} // namespace mongo
-
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 1c4c9d7a508..733b3b19cee 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -49,193 +49,205 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
-
- /* For testing only, not for general use. Enabled via command-line */
- class GodInsert : public Command {
- public:
- GodInsert() : Command( "godinsert" ) { }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual void help( stringstream &help ) const {
- help << "internal. for testing only.";
- }
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string coll = cmdObj[ "godinsert" ].valuestrsafe();
- log() << "test only command godinsert invoked coll:" << coll << endl;
- uassert( 13049, "godinsert must specify a collection", !coll.empty() );
- string ns = dbname + "." + coll;
- BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
-
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
- OldClientContext ctx(txn, ns );
- Database* db = ctx.db();
-
- WriteUnitOfWork wunit(txn);
- txn->setReplicatedWrites(false);
- Collection* collection = db->getCollection( ns );
- if ( !collection ) {
- collection = db->createCollection( txn, ns );
- if ( !collection ) {
- errmsg = "could not create collection";
- return false;
- }
- }
- StatusWith<RecordId> res = collection->insertDocument( txn, obj, false );
- Status status = res.getStatus();
- if (status.isOK()) {
- wunit.commit();
+using std::endl;
+using std::string;
+using std::stringstream;
+
+/* For testing only, not for general use. Enabled via command-line */
+class GodInsert : public Command {
+public:
+ GodInsert() : Command("godinsert") {}
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual void help(stringstream& help) const {
+ help << "internal. for testing only.";
+ }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string coll = cmdObj["godinsert"].valuestrsafe();
+ log() << "test only command godinsert invoked coll:" << coll << endl;
+ uassert(13049, "godinsert must specify a collection", !coll.empty());
+ string ns = dbname + "." + coll;
+ BSONObj obj = cmdObj["obj"].embeddedObjectUserCheck();
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock lk(txn->lockState(), dbname, MODE_X);
+ OldClientContext ctx(txn, ns);
+ Database* db = ctx.db();
+
+ WriteUnitOfWork wunit(txn);
+ txn->setReplicatedWrites(false);
+ Collection* collection = db->getCollection(ns);
+ if (!collection) {
+ collection = db->createCollection(txn, ns);
+ if (!collection) {
+ errmsg = "could not create collection";
+ return false;
}
- return appendCommandStatus( result, res.getStatus() );
}
- };
-
- /* for diagnostic / testing purposes. Enabled via command line. */
- class CmdSleep : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
- help << "w:true write lock. secs:<seconds>";
+ StatusWith<RecordId> res = collection->insertDocument(txn, obj, false);
+ Status status = res.getStatus();
+ if (status.isOK()) {
+ wunit.commit();
}
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- CmdSleep() : Command("sleep") { }
- bool run(OperationContext* txn,
- const string& ns,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- log() << "test only command sleep invoked" << endl;
- long long millis = 10 * 1000;
-
- if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
- millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
- }
- else if (cmdObj["secs"].isNumber()) {
- millis = cmdObj["secs"].numberLong() * 1000;
- }
- else if (cmdObj["millis"].isNumber()) {
- millis = cmdObj["millis"].numberLong();
- }
-
- if(cmdObj.getBoolField("w")) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- sleepmillis(millis);
- }
- else {
- ScopedTransaction transaction(txn, MODE_S);
- Lock::GlobalRead lk(txn->lockState());
- sleepmillis(millis);
- }
-
- // Interrupt point for testing (e.g. maxTimeMS).
- txn->checkForInterrupt();
+ return appendCommandStatus(result, res.getStatus());
+ }
+};
- return true;
- }
- };
-
- // Testing only, enabled via command-line.
- class CapTrunc : public Command {
- public:
- CapTrunc() : Command( "captrunc" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string coll = cmdObj[ "captrunc" ].valuestrsafe();
- uassert( 13416, "captrunc must specify a collection", !coll.empty() );
- NamespaceString nss( dbname, coll );
- int n = cmdObj.getIntField( "n" );
- bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?
-
- OldClientWriteContext ctx(txn, nss.ns() );
- Collection* collection = ctx.getCollection();
- massert( 13417, "captrunc collection not found or empty", collection);
-
- RecordId end;
- {
- std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
- nss.ns(),
- collection,
- InternalPlanner::BACKWARD));
- // We remove 'n' elements so the start is one past that
- for( int i = 0; i < n + 1; ++i ) {
- PlanExecutor::ExecState state = exec->getNext(NULL, &end);
- massert( 13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
- }
- }
- WriteUnitOfWork wuow(txn);
- collection->temp_cappedTruncateAfter( txn, end, inc );
- wuow.commit();
- return true;
- }
- };
-
- // Testing-only, enabled via command line.
- class EmptyCapped : public Command {
- public:
- EmptyCapped() : Command( "emptycapped" ) {}
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- // No auth needed because it only works when enabled via command line.
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {}
-
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
-
- return appendCommandStatus(result, emptyCapped(txn, NamespaceString(ns)));
+/* for diagnostic / testing purposes. Enabled via command line. */
+class CmdSleep : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
+ help << "w:true write lock. secs:<seconds>";
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ CmdSleep() : Command("sleep") {}
+ bool run(OperationContext* txn,
+ const string& ns,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ log() << "test only command sleep invoked" << endl;
+ long long millis = 10 * 1000;
+
+ if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
+ millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
+ } else if (cmdObj["secs"].isNumber()) {
+ millis = cmdObj["secs"].numberLong() * 1000;
+ } else if (cmdObj["millis"].isNumber()) {
+ millis = cmdObj["millis"].numberLong();
}
- };
+ if (cmdObj.getBoolField("w")) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ sleepmillis(millis);
+ } else {
+ ScopedTransaction transaction(txn, MODE_S);
+ Lock::GlobalRead lk(txn->lockState());
+ sleepmillis(millis);
+ }
- // ----------------------------
+ // Interrupt point for testing (e.g. maxTimeMS).
+ txn->checkForInterrupt();
- MONGO_INITIALIZER(RegisterEmptyCappedCmd)(InitializerContext* context) {
- if (Command::testCommandsEnabled) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CapTrunc();
- new CmdSleep();
- new EmptyCapped();
- new GodInsert();
+ return true;
+ }
+};
+
+// Testing only, enabled via command-line.
+class CapTrunc : public Command {
+public:
+ CapTrunc() : Command("captrunc") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string coll = cmdObj["captrunc"].valuestrsafe();
+ uassert(13416, "captrunc must specify a collection", !coll.empty());
+ NamespaceString nss(dbname, coll);
+ int n = cmdObj.getIntField("n");
+ bool inc = cmdObj.getBoolField("inc"); // inclusive range?
+
+ OldClientWriteContext ctx(txn, nss.ns());
+ Collection* collection = ctx.getCollection();
+ massert(13417, "captrunc collection not found or empty", collection);
+
+ RecordId end;
+ {
+ std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ txn, nss.ns(), collection, InternalPlanner::BACKWARD));
+ // We remove 'n' elements so the start is one past that
+ for (int i = 0; i < n + 1; ++i) {
+ PlanExecutor::ExecState state = exec->getNext(NULL, &end);
+ massert(13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
+ }
}
- return Status::OK();
+ WriteUnitOfWork wuow(txn);
+ collection->temp_cappedTruncateAfter(txn, end, inc);
+ wuow.commit();
+ return true;
+ }
+};
+
+// Testing-only, enabled via command line.
+class EmptyCapped : public Command {
+public:
+ EmptyCapped() : Command("emptycapped") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
}
+ // No auth needed because it only works when enabled via command line.
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {}
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
+
+ return appendCommandStatus(result, emptyCapped(txn, NamespaceString(ns)));
+ }
+};
+// ----------------------------
+MONGO_INITIALIZER(RegisterEmptyCappedCmd)(InitializerContext* context) {
+ if (Command::testCommandsEnabled) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CapTrunc();
+ new CmdSleep();
+ new EmptyCapped();
+ new GodInsert();
+ }
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index 3328c286fd0..b716457f311 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -40,52 +40,56 @@
namespace {
- using namespace mongo;
+using namespace mongo;
- class TopCommand : public Command {
- public:
- TopCommand() : Command("top", true) {}
+class TopCommand : public Command {
+public:
+ TopCommand() : Command("top", true) {}
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help(std::stringstream& help) const {
- help << "usage by collection, in micros ";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::top);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- virtual bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
- {
- BSONObjBuilder b( result.subobjStart( "totals" ) );
- b.append( "note", "all times in microseconds" );
- Top::get(txn->getClient()->getServiceContext()).append(b);
- b.done();
- }
- return true;
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void help(std::stringstream& help) const {
+ help << "usage by collection, in micros ";
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::top);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ virtual bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ {
+ BSONObjBuilder b(result.subobjStart("totals"));
+ b.append("note", "all times in microseconds");
+ Top::get(txn->getClient()->getServiceContext()).append(b);
+ b.done();
}
+ return true;
+ }
+};
- };
-
- //
- // Command instance.
- // Registers command with the command system and make command
- // available to the client.
- //
-
- MONGO_INITIALIZER(RegisterTopCommand)(InitializerContext* context) {
+//
+// Command instance.
+// Registers command with the command system and make command
+// available to the client.
+//
- new TopCommand();
+MONGO_INITIALIZER(RegisterTopCommand)(InitializerContext* context) {
+ new TopCommand();
- return Status::OK();
- }
-} // namespace
+ return Status::OK();
+}
+} // namespace
diff --git a/src/mongo/db/commands/touch.cpp b/src/mongo/db/commands/touch.cpp
index 1b71e55ab53..9beace559f2 100644
--- a/src/mongo/db/commands/touch.cpp
+++ b/src/mongo/db/commands/touch.cpp
@@ -52,66 +52,71 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- class TouchCmd : public Command {
- public:
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual bool adminOnly() const { return false; }
- virtual bool slaveOk() const { return true; }
- virtual bool maintenanceMode() const { return true; }
- virtual void help( stringstream& help ) const {
- help << "touch collection\n"
+class TouchCmd : public Command {
+public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const {
+ return true;
+ }
+ virtual void help(stringstream& help) const {
+ help << "touch collection\n"
"Page in all pages of memory containing every extent for the given collection\n"
"{ touch : <collection_name>, [data : true] , [index : true] }\n"
" at least one of data or index must be true; default is both are false\n";
- }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::touch);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
- TouchCmd() : Command("touch") { }
-
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::touch);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+ TouchCmd() : Command("touch") {}
- const NamespaceString nss(ns);
- if ( ! nss.isNormal() ) {
- errmsg = "bad namespace name";
- return false;
- }
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
- bool touch_indexes( cmdObj["index"].trueValue() );
- bool touch_data( cmdObj["data"].trueValue() );
+ const NamespaceString nss(ns);
+ if (!nss.isNormal()) {
+ errmsg = "bad namespace name";
+ return false;
+ }
- if ( ! (touch_indexes || touch_data) ) {
- errmsg = "must specify at least one of (data:true, index:true)";
- return false;
- }
+ bool touch_indexes(cmdObj["index"].trueValue());
+ bool touch_data(cmdObj["data"].trueValue());
- AutoGetCollectionForRead context(txn, nss);
+ if (!(touch_indexes || touch_data)) {
+ errmsg = "must specify at least one of (data:true, index:true)";
+ return false;
+ }
- Collection* collection = context.getCollection();
- if ( !collection ) {
- errmsg = "collection not found";
- return false;
- }
+ AutoGetCollectionForRead context(txn, nss);
- return appendCommandStatus( result,
- collection->touch( txn,
- touch_data, touch_indexes,
- &result ) );
+ Collection* collection = context.getCollection();
+ if (!collection) {
+ errmsg = "collection not found";
+ return false;
}
- };
- static TouchCmd touchCmd;
+ return appendCommandStatus(result,
+ collection->touch(txn, touch_data, touch_indexes, &result));
+ }
+};
+static TouchCmd touchCmd;
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 709325a5410..efdf7929bdb 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -72,2872 +72,2724 @@
namespace mongo {
- namespace str = mongoutils::str;
+namespace str = mongoutils::str;
- using std::endl;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::endl;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
- // Used to obtain mutex that guards modifications to persistent authorization data
- const auto getAuthzDataMutex = ServiceContext::declareDecoration<stdx::timed_mutex>();
+// Used to obtain mutex that guards modifications to persistent authorization data
+const auto getAuthzDataMutex = ServiceContext::declareDecoration<stdx::timed_mutex>();
- const Seconds authzDataMutexAcquisitionTimeout{5};
+const Seconds authzDataMutexAcquisitionTimeout{5};
- BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
- BSONArrayBuilder rolesArrayBuilder;
- for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
- const RoleName& role = *it;
- rolesArrayBuilder.append(
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
- }
- return rolesArrayBuilder.arr();
+BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
+ BSONArrayBuilder rolesArrayBuilder;
+ for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
+ const RoleName& role = *it;
+ rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()));
}
+ return rolesArrayBuilder.arr();
+}
+
+BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
+ BSONArrayBuilder rolesArrayBuilder;
+ for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
+ const RoleName& role = *it;
+ rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()));
+ }
+ return rolesArrayBuilder.arr();
+}
- BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
- BSONArrayBuilder rolesArrayBuilder;
- for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
- const RoleName& role = *it;
- rolesArrayBuilder.append(
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()));
- }
- return rolesArrayBuilder.arr();
- }
-
- Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray* result) {
- BSONArrayBuilder arrBuilder;
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- const Privilege& privilege = *it;
-
- ParsedPrivilege parsedPrivilege;
- std::string errmsg;
- if (!ParsedPrivilege::privilegeToParsedPrivilege(privilege,
- &parsedPrivilege,
- &errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
- if (!parsedPrivilege.isValid(&errmsg)) {
- return Status(ErrorCodes::FailedToParse, errmsg);
- }
- arrBuilder.append(parsedPrivilege.toBSON());
+Status privilegeVectorToBSONArray(const PrivilegeVector& privileges, BSONArray* result) {
+ BSONArrayBuilder arrBuilder;
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ const Privilege& privilege = *it;
+
+ ParsedPrivilege parsedPrivilege;
+ std::string errmsg;
+ if (!ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
}
- *result = arrBuilder.arr();
- return Status::OK();
+ if (!parsedPrivilege.isValid(&errmsg)) {
+ return Status(ErrorCodes::FailedToParse, errmsg);
+ }
+ arrBuilder.append(parsedPrivilege.toBSON());
}
+ *result = arrBuilder.arr();
+ return Status::OK();
+}
- /**
- * Used to get all current roles of the user identified by 'userName'.
- */
- Status getCurrentUserRoles(OperationContext* txn,
- AuthorizationManager* authzManager,
- const UserName& userName,
- unordered_set<RoleName>* roles) {
- User* user;
- authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
- Status status = authzManager->acquireUser(txn, userName, &user);
+/**
+ * Used to get all current roles of the user identified by 'userName'.
+ */
+Status getCurrentUserRoles(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ const UserName& userName,
+ unordered_set<RoleName>* roles) {
+ User* user;
+ authzManager->invalidateUserByName(userName); // Need to make sure cache entry is up to date
+ Status status = authzManager->acquireUser(txn, userName, &user);
+ if (!status.isOK()) {
+ return status;
+ }
+ RoleNameIterator rolesIt = user->getRoles();
+ while (rolesIt.more()) {
+ roles->insert(rolesIt.next());
+ }
+ authzManager->releaseUser(user);
+ return Status::OK();
+}
+
+/**
+ * Checks that every role in "rolesToAdd" exists, that adding each of those roles to "role"
+ * will not result in a cycle to the role graph, and that every role being added comes from the
+ * same database as the role it is being added to (or that the role being added to is from the
+ * "admin" database.
+ */
+Status checkOkayToGrantRolesToRole(const RoleName& role,
+ const std::vector<RoleName> rolesToAdd,
+ AuthorizationManager* authzManager) {
+ for (std::vector<RoleName>::const_iterator it = rolesToAdd.begin(); it != rolesToAdd.end();
+ ++it) {
+ const RoleName& roleToAdd = *it;
+ if (roleToAdd == role) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream() << "Cannot grant role " << role.getFullName()
+ << " to itself.");
+ }
+
+ if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream()
+ << "Roles on the \'" << role.getDB()
+ << "\' database cannot be granted roles from other databases");
+ }
+
+ BSONObj roleToAddDoc;
+ Status status = authzManager->getRoleDescription(roleToAdd, false, &roleToAddDoc);
+ if (status == ErrorCodes::RoleNotFound) {
+ return Status(ErrorCodes::RoleNotFound,
+ "Cannot grant nonexistent role " + roleToAdd.toString());
+ }
if (!status.isOK()) {
return status;
}
- RoleNameIterator rolesIt = user->getRoles();
- while (rolesIt.more()) {
- roles->insert(rolesIt.next());
+ std::vector<RoleName> indirectRoles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleToAddDoc["inheritedRoles"].Obj()), role.getDB(), &indirectRoles);
+ if (!status.isOK()) {
+ return status;
}
- authzManager->releaseUser(user);
+
+ if (sequenceContains(indirectRoles, role)) {
+ return Status(ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream()
+ << "Granting " << roleToAdd.getFullName() << " to "
+ << role.getFullName()
+ << " would introduce a cycle in the role graph.");
+ }
+ }
+ return Status::OK();
+}
+
+/**
+ * Checks that every privilege being granted targets just the database the role is from, or that
+ * the role is from the "admin" db.
+ */
+Status checkOkayToGrantPrivilegesToRole(const RoleName& role, const PrivilegeVector& privileges) {
+ if (role.getDB() == "admin") {
return Status::OK();
}
- /**
- * Checks that every role in "rolesToAdd" exists, that adding each of those roles to "role"
- * will not result in a cycle to the role graph, and that every role being added comes from the
- * same database as the role it is being added to (or that the role being added to is from the
- * "admin" database.
- */
- Status checkOkayToGrantRolesToRole(const RoleName& role,
- const std::vector<RoleName> rolesToAdd,
- AuthorizationManager* authzManager) {
- for (std::vector<RoleName>::const_iterator it = rolesToAdd.begin();
- it != rolesToAdd.end(); ++it) {
- const RoleName& roleToAdd = *it;
- if (roleToAdd == role) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Cannot grant role " <<
- role.getFullName() << " to itself.");
- }
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ const ResourcePattern& resource = (*it).getResourcePattern();
+ if ((resource.isDatabasePattern() || resource.isExactNamespacePattern()) &&
+ (resource.databaseToMatch() == role.getDB())) {
+ continue;
+ }
- if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB() <<
- "\' database cannot be granted roles from other databases");
- }
+ return Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << "Roles on the \'" << role.getDB()
+ << "\' database cannot be granted privileges that target other "
+ "databases or the cluster");
+ }
- BSONObj roleToAddDoc;
- Status status = authzManager->getRoleDescription(roleToAdd, false, &roleToAddDoc);
- if (status == ErrorCodes::RoleNotFound) {
- return Status(ErrorCodes::RoleNotFound,
- "Cannot grant nonexistent role " + roleToAdd.toString());
- }
- if (!status.isOK()) {
- return status;
- }
- std::vector<RoleName> indirectRoles;
- status = auth::parseRoleNamesFromBSONArray(
- BSONArray(roleToAddDoc["inheritedRoles"].Obj()),
- role.getDB(),
- &indirectRoles);
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+}
- if (sequenceContains(indirectRoles, role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream() << "Granting " <<
- roleToAdd.getFullName() << " to " << role.getFullName()
- << " would introduce a cycle in the role graph.");
- }
- }
+void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj) {
+ array->append(obj);
+}
+
+/**
+ * Finds all documents matching "query" in "collectionName". For each document returned,
+ * calls the function resultProcessor on it.
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status queryAuthzDocument(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& projection,
+ const stdx::function<void(const BSONObj&)>& resultProcessor) {
+ try {
+ DBDirectClient client(txn);
+ client.query(resultProcessor, collectionName.ns(), query, &projection);
return Status::OK();
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Checks that every privilege being granted targets just the database the role is from, or that
- * the role is from the "admin" db.
- */
- Status checkOkayToGrantPrivilegesToRole(const RoleName& role,
- const PrivilegeVector& privileges) {
- if (role.getDB() == "admin") {
+/**
+ * Inserts "document" into "collectionName".
+ * If there is a duplicate key error, returns a Status with code DuplicateKey.
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status insertAuthzDocument(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& document,
+ const BSONObj& writeConcern) {
+ try {
+ DBDirectClient client(txn);
+ client.insert(collectionName, document);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
return Status::OK();
}
-
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- const ResourcePattern& resource = (*it).getResourcePattern();
- if ((resource.isDatabasePattern() || resource.isExactNamespacePattern()) &&
- (resource.databaseToMatch() == role.getDB())) {
- continue;
- }
-
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream() << "Roles on the \'" << role.getDB() <<
- "\' database cannot be granted privileges that target other "
- "databases or the cluster");
+ if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) {
+ return Status(ErrorCodes::DuplicateKey, errstr);
}
-
- return Status::OK();
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- void appendBSONObjToBSONArrayBuilder(BSONArrayBuilder* array, const BSONObj& obj) {
- array->append(obj);
+/**
+ * Updates documents matching "query" according to "updatePattern" in "collectionName".
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status updateAuthzDocuments(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& updatePattern,
+ bool upsert,
+ bool multi,
+ const BSONObj& writeConcern,
+ int* nMatched) {
+ try {
+ DBDirectClient client(txn);
+ client.update(collectionName, query, updatePattern, upsert, multi);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
+ *nMatched = res["n"].numberInt();
+ return Status::OK();
+ }
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Finds all documents matching "query" in "collectionName". For each document returned,
- * calls the function resultProcessor on it.
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status queryAuthzDocument(OperationContext* txn,
+/**
+ * Update one document matching "query" according to "updatePattern" in "collectionName".
+ *
+ * If "upsert" is true and no document matches "query", inserts one using "query" as a
+ * template.
+ * If "upsert" is false and no document matches "query", return a Status with the code
+ * NoMatchingDocument. The Status message in that case is not very descriptive and should
+ * not be displayed to the end user.
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status updateOneAuthzDocument(OperationContext* txn,
const NamespaceString& collectionName,
const BSONObj& query,
- const BSONObj& projection,
- const stdx::function<void(const BSONObj&)>& resultProcessor) {
- try {
- DBDirectClient client(txn);
- client.query(resultProcessor, collectionName.ns(), query, &projection);
- return Status::OK();
- } catch (const DBException& e) {
- return e.toStatus();
- }
+ const BSONObj& updatePattern,
+ bool upsert,
+ const BSONObj& writeConcern) {
+ int nMatched;
+ Status status = updateAuthzDocuments(
+ txn, collectionName, query, updatePattern, upsert, false, writeConcern, &nMatched);
+ if (!status.isOK()) {
+ return status;
}
+ dassert(nMatched == 1 || nMatched == 0);
+ if (nMatched == 0) {
+ return Status(ErrorCodes::NoMatchingDocument, "No document found");
+ }
+ return Status::OK();
+}
- /**
- * Inserts "document" into "collectionName".
- * If there is a duplicate key error, returns a Status with code DuplicateKey.
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status insertAuthzDocument(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& document,
- const BSONObj& writeConcern) {
- try {
- DBDirectClient client(txn);
- client.insert(collectionName, document);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- return Status::OK();
- }
- if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) {
- return Status(ErrorCodes::DuplicateKey, errstr);
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
+/**
+ * Removes all documents matching "query" from "collectionName".
+ *
+ * Should only be called on collections with authorization documents in them
+ * (ie admin.system.users and admin.system.roles).
+ */
+Status removeAuthzDocuments(OperationContext* txn,
+ const NamespaceString& collectionName,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ try {
+ DBDirectClient client(txn);
+ client.remove(collectionName, query);
+
+ // Handle write concern
+ BSONObjBuilder gleBuilder;
+ gleBuilder.append("getLastError", 1);
+ gleBuilder.appendElements(writeConcern);
+ BSONObj res;
+ client.runCommand("admin", gleBuilder.done(), res);
+ string errstr = client.getLastErrorString(res);
+ if (errstr.empty()) {
+ *numRemoved = res["n"].numberInt();
+ return Status::OK();
}
+ return Status(ErrorCodes::UnknownError, errstr);
+ } catch (const DBException& e) {
+ return e.toStatus();
}
+}
- /**
- * Updates documents matching "query" according to "updatePattern" in "collectionName".
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status updateAuthzDocuments(OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- bool multi,
- const BSONObj& writeConcern,
- int* nMatched) {
- try {
- DBDirectClient client(txn);
- client.update(collectionName, query, updatePattern, upsert, multi);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- *nMatched = res["n"].numberInt();
- return Status::OK();
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
- }
+/**
+ * Creates the given role object in the given database.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status insertRoleDocument(OperationContext* txn,
+ const BSONObj& roleObj,
+ const BSONObj& writeConcern) {
+ Status status = insertAuthzDocument(
+ txn, AuthorizationManager::rolesCollectionNamespace, roleObj, writeConcern);
+ if (status.isOK()) {
+ return status;
}
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ std::string name = roleObj[AuthorizationManager::ROLE_NAME_FIELD_NAME].String();
+ std::string source = roleObj[AuthorizationManager::ROLE_DB_FIELD_NAME].String();
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << "Role \"" << name << "@" << source << "\" already exists");
+ }
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Update one document matching "query" according to "updatePattern" in "collectionName".
- *
- * If "upsert" is true and no document matches "query", inserts one using "query" as a
- * template.
- * If "upsert" is false and no document matches "query", return a Status with the code
- * NoMatchingDocument. The Status message in that case is not very descriptive and should
- * not be displayed to the end user.
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status updateOneAuthzDocument(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& updatePattern,
- bool upsert,
- const BSONObj& writeConcern) {
- int nMatched;
- Status status = updateAuthzDocuments(txn,
- collectionName,
- query,
- updatePattern,
- upsert,
- false,
- writeConcern,
- &nMatched);
- if (!status.isOK()) {
- return status;
- }
- dassert(nMatched == 1 || nMatched == 0);
- if (nMatched == 0) {
- return Status(ErrorCodes::NoMatchingDocument, "No document found");
- }
- return Status::OK();
+/**
+ * Updates the given role object with the given update modifier.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status updateRoleDocument(OperationContext* txn,
+ const RoleName& role,
+ const BSONObj& updateObj,
+ const BSONObj& writeConcern) {
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
+ updateObj,
+ false,
+ writeConcern);
+ if (status.isOK()) {
+ return status;
+ }
+ if (status.code() == ErrorCodes::NoMatchingDocument) {
+ return Status(ErrorCodes::RoleNotFound,
+ str::stream() << "Role " << role.getFullName() << " not found");
}
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Removes all documents matching "query" from "collectionName".
- *
- * Should only be called on collections with authorization documents in them
- * (ie admin.system.users and admin.system.roles).
- */
- Status removeAuthzDocuments(
- OperationContext* txn,
- const NamespaceString& collectionName,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- try {
- DBDirectClient client(txn);
- client.remove(collectionName, query);
-
- // Handle write concern
- BSONObjBuilder gleBuilder;
- gleBuilder.append("getLastError", 1);
- gleBuilder.appendElements(writeConcern);
- BSONObj res;
- client.runCommand("admin", gleBuilder.done(), res);
- string errstr = client.getLastErrorString(res);
- if (errstr.empty()) {
- *numRemoved = res["n"].numberInt();
- return Status::OK();
- }
- return Status(ErrorCodes::UnknownError, errstr);
- } catch (const DBException& e) {
- return e.toStatus();
- }
+/**
+ * Removes roles matching the given query.
+ * Writes into *numRemoved the number of role documents that were modified.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status removeRoleDocuments(OperationContext* txn,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ Status status = removeAuthzDocuments(
+ txn, AuthorizationManager::rolesCollectionNamespace, query, writeConcern, numRemoved);
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::RoleModificationFailed, status.reason());
}
+ return status;
+}
- /**
- * Creates the given role object in the given database.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status insertRoleDocument(OperationContext* txn,
- const BSONObj& roleObj,
- const BSONObj& writeConcern) {
- Status status = insertAuthzDocument(txn,
- AuthorizationManager::rolesCollectionNamespace,
- roleObj,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::DuplicateKey) {
- std::string name = roleObj[AuthorizationManager::ROLE_NAME_FIELD_NAME].String();
- std::string source = roleObj[AuthorizationManager::ROLE_DB_FIELD_NAME].String();
- return Status(ErrorCodes::DuplicateKey,
- str::stream() << "Role \"" << name << "@" << source
- << "\" already exists");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
+/**
+ * Creates the given user object in the given database.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status insertPrivilegeDocument(OperationContext* txn,
+ const BSONObj& userObj,
+ const BSONObj& writeConcern) {
+ Status status = insertAuthzDocument(
+ txn, AuthorizationManager::usersCollectionNamespace, userObj, writeConcern);
+ if (status.isOK()) {
return status;
}
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ std::string name = userObj[AuthorizationManager::USER_NAME_FIELD_NAME].String();
+ std::string source = userObj[AuthorizationManager::USER_DB_FIELD_NAME].String();
+ return Status(ErrorCodes::DuplicateKey,
+ str::stream() << "User \"" << name << "@" << source << "\" already exists");
+ }
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Updates the given role object with the given update modifier.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status updateRoleDocument(OperationContext* txn,
- const RoleName& role,
+/**
+ * Updates the given user object with the given update modifier.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status updatePrivilegeDocument(OperationContext* txn,
+ const UserName& user,
const BSONObj& updateObj,
const BSONObj& writeConcern) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << role.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
- updateObj,
- false,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::NoMatchingDocument) {
- return Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role " << role.getFullName()
- << " not found");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
+ updateObj,
+ false,
+ writeConcern);
+ if (status.isOK()) {
return status;
}
-
- /**
- * Removes roles matching the given query.
- * Writes into *numRemoved the number of role documents that were modified.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status removeRoleDocuments(OperationContext* txn,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- Status status = removeAuthzDocuments(txn,
- AuthorizationManager::rolesCollectionNamespace,
- query,
- writeConcern,
- numRemoved);
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::RoleModificationFailed, status.reason());
- }
- return status;
+ if (status.code() == ErrorCodes::NoMatchingDocument) {
+ return Status(ErrorCodes::UserNotFound,
+ str::stream() << "User " << user.getFullName() << " not found");
}
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
+ }
+ return status;
+}
- /**
- * Creates the given user object in the given database.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status insertPrivilegeDocument(OperationContext* txn,
- const BSONObj& userObj,
- const BSONObj& writeConcern) {
- Status status = insertAuthzDocument(txn,
- AuthorizationManager::usersCollectionNamespace,
- userObj,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::DuplicateKey) {
- std::string name = userObj[AuthorizationManager::USER_NAME_FIELD_NAME].String();
- std::string source = userObj[AuthorizationManager::USER_DB_FIELD_NAME].String();
- return Status(ErrorCodes::DuplicateKey,
- str::stream() << "User \"" << name << "@" << source
- << "\" already exists");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
- return status;
+/**
+ * Removes users for the given database matching the given query.
+ * Writes into *numRemoved the number of user documents that were modified.
+ * 'writeConcern' contains the arguments to be passed to getLastError to block for
+ * successful completion of the write.
+ */
+Status removePrivilegeDocuments(OperationContext* txn,
+ const BSONObj& query,
+ const BSONObj& writeConcern,
+ int* numRemoved) {
+ Status status = removeAuthzDocuments(
+ txn, AuthorizationManager::usersCollectionNamespace, query, writeConcern, numRemoved);
+ if (status.code() == ErrorCodes::UnknownError) {
+ return Status(ErrorCodes::UserModificationFailed, status.reason());
}
+ return status;
+}
- /**
- * Updates the given user object with the given update modifier.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status updatePrivilegeDocument(OperationContext* txn,
- const UserName& user,
- const BSONObj& updateObj,
- const BSONObj& writeConcern) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME << user.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
- updateObj,
- false,
- writeConcern);
- if (status.isOK()) {
- return status;
- }
- if (status.code() == ErrorCodes::NoMatchingDocument) {
- return Status(ErrorCodes::UserNotFound,
- str::stream() << "User " << user.getFullName()
- << " not found");
- }
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
- return status;
+/**
+ * Updates the auth schema version document to reflect the current state of the system.
+ * 'foundSchemaVersion' is the authSchemaVersion to update with.
+ */
+Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ int foundSchemaVersion) {
+ Status status = updateOneAuthzDocument(
+ txn,
+ AuthorizationManager::versionCollectionNamespace,
+ AuthorizationManager::versionDocumentQuery,
+ BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << foundSchemaVersion)),
+ true, // upsert
+ BSONObj()); // write concern
+ if (status == ErrorCodes::NoMatchingDocument) { // SERVER-11492
+ status = Status::OK();
}
- /**
- * Removes users for the given database matching the given query.
- * Writes into *numRemoved the number of user documents that were modified.
- * 'writeConcern' contains the arguments to be passed to getLastError to block for
- * successful completion of the write.
- */
- Status removePrivilegeDocuments(OperationContext* txn,
- const BSONObj& query,
- const BSONObj& writeConcern,
- int* numRemoved) {
- Status status = removeAuthzDocuments(txn,
- AuthorizationManager::usersCollectionNamespace,
- query,
- writeConcern,
- numRemoved);
- if (status.code() == ErrorCodes::UnknownError) {
- return Status(ErrorCodes::UserModificationFailed, status.reason());
- }
+ return status;
+}
+
+/**
+ * Returns Status::OK() if the current Auth schema version is at least the auth schema version
+ * for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
+ * Returns an error otherwise.
+ */
+Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManager* authzManager) {
+ int foundSchemaVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ if (!status.isOK()) {
return status;
}
- /**
- * Updates the auth schema version document to reflect the current state of the system.
- * 'foundSchemaVersion' is the authSchemaVersion to update with.
- */
- Status writeAuthSchemaVersionIfNeeded(OperationContext* txn,
- AuthorizationManager* authzManager,
- int foundSchemaVersion) {
- Status status = updateOneAuthzDocument(txn,
- AuthorizationManager::versionCollectionNamespace,
- AuthorizationManager::versionDocumentQuery,
- BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName
- << foundSchemaVersion)),
- true, // upsert
- BSONObj()); // write concern
- if (status == ErrorCodes::NoMatchingDocument) { // SERVER-11492
- status = Status::OK();
- }
+ if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
+ return Status(ErrorCodes::AuthSchemaIncompatible,
+ str::stream()
+ << "User and role management commands require auth data to have "
+ << "at least schema version "
+ << AuthorizationManager::schemaVersion26Final << " but found "
+ << foundSchemaVersion);
+ }
+ return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+}
+/**
+ * Returns Status::OK() if the current Auth schema version is at least the auth schema version
+ * for MongoDB 2.6 during the upgrade process.
+ * Returns an error otherwise.
+ */
+Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
+ AuthorizationManager* authzManager) {
+ int foundSchemaVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
+ if (!status.isOK()) {
return status;
}
- /**
- * Returns Status::OK() if the current Auth schema version is at least the auth schema version
- * for the MongoDB 2.6 and 3.0 MongoDB-CR/SCRAM mixed auth mode.
- * Returns an error otherwise.
- */
- Status requireAuthSchemaVersion26Final(OperationContext* txn,
- AuthorizationManager* authzManager) {
- int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
- if (!status.isOK()) {
- return status;
- }
-
- if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) {
- return Status(
- ErrorCodes::AuthSchemaIncompatible,
- str::stream() << "User and role management commands require auth data to have "
- << "at least schema version "
- << AuthorizationManager::schemaVersion26Final
- << " but found " << foundSchemaVersion);
- }
- return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
+ if (foundSchemaVersion < AuthorizationManager::schemaVersion26Upgrade) {
+ return Status(ErrorCodes::AuthSchemaIncompatible,
+ str::stream() << "The usersInfo and rolesInfo commands require auth data to "
+ << "have at least schema version "
+ << AuthorizationManager::schemaVersion26Upgrade << " but found "
+ << foundSchemaVersion);
}
+ return Status::OK();
+}
- /**
- * Returns Status::OK() if the current Auth schema version is at least the auth schema version
- * for MongoDB 2.6 during the upgrade process.
- * Returns an error otherwise.
- */
- Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
- AuthorizationManager* authzManager) {
- int foundSchemaVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &foundSchemaVersion);
- if (!status.isOK()) {
- return status;
- }
+} // namespace
- if (foundSchemaVersion < AuthorizationManager::schemaVersion26Upgrade) {
- return Status(
- ErrorCodes::AuthSchemaIncompatible,
- str::stream() << "The usersInfo and rolesInfo commands require auth data to "
- << "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade
- << " but found " << foundSchemaVersion);
- }
- return Status::OK();
- }
-} // namespace
+class CmdCreateUser : public Command {
+public:
+ CmdCreateUser() : Command("createUser") {}
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdCreateUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a user to the system" << endl;
+ }
- CmdCreateUser() : Command("createUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ if (args.userName.getDB() == "local") {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Adds a user to the system" << endl;
+ if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must provide a 'pwd' field for all user documents, except those"
+ " with '$external' as the user's source db"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
+ if ((args.hasHashedPassword) && args.userName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database"));
}
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "createUser",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ if (!args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "\"createUser\" command requires a \"roles\" array"));
+ }
- if (args.userName.getDB() == "local") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Cannot create users in the local database"));
- }
+#ifdef MONGO_CONFIG_SSL
+ if (args.userName.getDB() == "$external" && getSSLManager() &&
+ getSSLManager()->getSSLConfiguration().serverSubjectName == args.userName.getUser()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue,
+ "Cannot create an x.509 user with the same "
+ "subjectname as the server"));
+ }
+#endif
- if (!args.hasHashedPassword && args.userName.getDB() != "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must provide a 'pwd' field for all user documents, except those"
- " with '$external' as the user's source db"));
- }
+ BSONObjBuilder userObjBuilder;
+ userObjBuilder.append(
+ "_id", str::stream() << args.userName.getDB() << "." << args.userName.getUser());
+ userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME, args.userName.getUser());
+ userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, args.userName.getDB());
- if ((args.hasHashedPassword) &&
- args.userName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ int authzVersion;
+ status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- if (!args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createUser\" command requires a \"roles\" array"));
+ BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
+ if (!args.hasHashedPassword) {
+ // Must be an external user
+ credentialsBuilder.append("external", true);
+ } else {
+ // Add SCRAM credentials for appropriate authSchemaVersions.
+ if (authzVersion > AuthorizationManager::schemaVersion26Final) {
+ BSONObj scramCred = scram::generateCredentials(
+ args.hashedPassword, saslGlobalParams.scramIterationCount);
+ credentialsBuilder.append("SCRAM-SHA-1", scramCred);
+ } else { // Otherwise default to MONGODB-CR.
+ credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
}
+ }
+ credentialsBuilder.done();
-#ifdef MONGO_CONFIG_SSL
- if (args.userName.getDB() == "$external" &&
- getSSLManager() &&
- getSSLManager()->getSSLConfiguration()
- .serverSubjectName == args.userName.getUser()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create an x.509 user with the same "
- "subjectname as the server"));
- }
-#endif
+ if (args.hasCustomData) {
+ userObjBuilder.append("customData", args.customData);
+ }
+ userObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- BSONObjBuilder userObjBuilder;
- userObjBuilder.append("_id",
- str::stream() << args.userName.getDB() << "." <<
- args.userName.getUser());
- userObjBuilder.append(AuthorizationManager::USER_NAME_FIELD_NAME,
- args.userName.getUser());
- userObjBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME,
- args.userName.getDB());
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- int authzVersion;
- status = authzManager->getAuthorizationVersion(txn, &authzVersion);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObj userObj = userObjBuilder.obj();
+ V2UserDocumentParser parser;
+ status = parser.checkValidUserDocument(userObj);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObjBuilder credentialsBuilder(userObjBuilder.subobjStart("credentials"));
- if (!args.hasHashedPassword) {
- // Must be an external user
- credentialsBuilder.append("external", true);
- }
- else {
- // Add SCRAM credentials for appropriate authSchemaVersions.
- if (authzVersion > AuthorizationManager::schemaVersion26Final) {
- BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword,
- saslGlobalParams.scramIterationCount);
- credentialsBuilder.append("SCRAM-SHA-1", scramCred);
- }
- else { // Otherwise default to MONGODB-CR.
- credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
- }
- }
- credentialsBuilder.done();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- if (args.hasCustomData) {
- userObjBuilder.append("customData", args.customData);
- }
- userObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObj userObj = userObjBuilder.obj();
- V2UserDocumentParser parser;
- status = parser.checkValidUserDocument(userObj);
+ // Role existence has to be checked after acquiring the update lock
+ for (size_t i = 0; i < args.roles.size(); ++i) {
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logCreateUser(ClientBasic::getCurrent(),
+ args.userName,
+ args.hasHashedPassword,
+ args.hasCustomData ? &args.customData : NULL,
+ args.roles);
+ status = insertPrivilegeDocument(txn, userObj, args.writeConcern);
+ return appendCommandStatus(result, status);
+ }
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
- // Role existence has to be checked after acquiring the update lock
- for (size_t i = 0; i < args.roles.size(); ++i) {
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+} cmdCreateUser;
- audit::logCreateUser(ClientBasic::getCurrent(),
- args.userName,
- args.hasHashedPassword,
- args.hasCustomData? &args.customData : NULL,
- args.roles);
- status = insertPrivilegeDocument(txn,
- userObj,
- args.writeConcern);
- return appendCommandStatus(result, status);
- }
+class CmdUpdateUser : public Command {
+public:
+ CmdUpdateUser() : Command("updateUser") {}
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdCreateUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdUpdateUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a user, for example to change its password" << endl;
+ }
- CmdUpdateUser() : Command("updateUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Used to update a user, for example to change its password" << endl;
+ if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateUser"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
+ if (args.hasHashedPassword && args.userName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Cannot set the password for users defined on the '$external' "
+ "database"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "updateUser",
- dbname,
- &args);
+ BSONObjBuilder updateSetBuilder;
+ if (args.hasHashedPassword) {
+ BSONObjBuilder credentialsBuilder(updateSetBuilder.subobjStart("credentials"));
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ int authzVersion;
+ Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- if (!args.hasHashedPassword && !args.hasCustomData && !args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateUser"));
+ // Add SCRAM credentials for appropriate authSchemaVersions
+ if (authzVersion > AuthorizationManager::schemaVersion26Final) {
+ BSONObj scramCred = scram::generateCredentials(
+ args.hashedPassword, saslGlobalParams.scramIterationCount);
+ credentialsBuilder.append("SCRAM-SHA-1", scramCred);
+ } else { // Otherwise default to MONGODB-CR
+ credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
}
+ credentialsBuilder.done();
+ }
+ if (args.hasCustomData) {
+ updateSetBuilder.append("customData", args.customData);
+ }
+ if (args.hasRoles) {
+ updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ }
- if (args.hasHashedPassword && args.userName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot set the password for users defined on the '$external' "
- "database"));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- BSONObjBuilder updateSetBuilder;
- if (args.hasHashedPassword) {
- BSONObjBuilder credentialsBuilder(updateSetBuilder.subobjStart("credentials"));
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- int authzVersion;
- Status status = authzManager->getAuthorizationVersion(txn, &authzVersion);
+
+ // Role existence has to be checked after acquiring the update lock
+ if (args.hasRoles) {
+ for (size_t i = 0; i < args.roles.size(); ++i) {
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
-
- // Add SCRAM credentials for appropriate authSchemaVersions
- if (authzVersion > AuthorizationManager::schemaVersion26Final) {
- BSONObj scramCred = scram::generateCredentials(
- args.hashedPassword,
- saslGlobalParams.scramIterationCount);
- credentialsBuilder.append("SCRAM-SHA-1",scramCred);
- }
- else { // Otherwise default to MONGODB-CR
- credentialsBuilder.append("MONGODB-CR", args.hashedPassword);
- }
- credentialsBuilder.done();
- }
- if (args.hasCustomData) {
- updateSetBuilder.append("customData", args.customData);
- }
- if (args.hasRoles) {
- updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
}
+ }
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logUpdateUser(ClientBasic::getCurrent(),
+ args.userName,
+ args.hasHashedPassword,
+ args.hasCustomData ? &args.customData : NULL,
+ args.hasRoles ? &args.roles : NULL);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ status = updatePrivilegeDocument(
+ txn, args.userName, BSON("$set" << updateSetBuilder.done()), args.writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(args.userName);
+ return appendCommandStatus(result, status);
+ }
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
- // Role existence has to be checked after acquiring the update lock
- if (args.hasRoles) {
- for (size_t i = 0; i < args.roles.size(); ++i) {
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roles[i], false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
- }
+} cmdUpdateUser;
- audit::logUpdateUser(ClientBasic::getCurrent(),
- args.userName,
- args.hasHashedPassword,
- args.hasCustomData? &args.customData : NULL,
- args.hasRoles? &args.roles : NULL);
-
- status = updatePrivilegeDocument(txn,
- args.userName,
- BSON("$set" << updateSetBuilder.done()),
- args.writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(args.userName);
- return appendCommandStatus(result, status);
- }
+class CmdDropUser : public Command {
+public:
+ CmdDropUser() : Command("dropUser") {}
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdUpdateUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- class CmdDropUser : public Command {
- public:
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single user." << endl;
+ }
- CmdDropUser() : Command("dropUser") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual void help(stringstream& ss) const {
- ss << "Drops a single user." << endl;
+ UserName userName;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
+ int nMatched;
+
+ audit::logDropUser(ClientBasic::getCurrent(), userName);
+
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ if (nMatched == 0) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::UserNotFound,
+ str::stream() << "User '" << userName.getFullName() << "' not found"));
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ return true;
+ }
+} cmdDropUser;
- UserName userName;
- BSONObj writeConcern;
- status = auth::parseAndValidateDropUserCommand(cmdObj,
- dbname,
- &userName,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropAllUsersFromDatabase : public Command {
+public:
+ CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
- int nMatched;
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logDropUser(ClientBasic::getCurrent(), userName);
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME << userName.getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME << userName.getDB()),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all users for a single database." << endl;
+ }
- if (nMatched == 0) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::UserNotFound,
- str::stream() << "User '" << userName.getFullName() <<
- "' not found"));
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
+ }
- return true;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- } cmdDropUser;
+ BSONObj writeConcern;
+ status =
+ auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj, dbname, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- class CmdDropAllUsersFromDatabase : public Command {
- public:
+ int numRemoved;
- CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
+ audit::logDropAllUsersFromDatabase(ClientBasic::getCurrent(), dbname);
- virtual bool slaveOk() const {
- return false;
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname),
+ writeConcern,
+ &numRemoved);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUsersFromDB(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ result.append("n", numRemoved);
+ return true;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Drops all users for a single database." << endl;
- }
+} cmdDropAllUsersFromDatabase;
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
- }
+class CmdGrantRolesToUser : public Command {
+public:
+ CmdGrantRolesToUser() : Command("grantRolesToUser") {}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- BSONObj writeConcern;
- status = auth::parseAndValidateDropAllUsersFromDatabaseCommand(cmdObj,
- dbname,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to a user." << endl;
+ }
- int numRemoved;
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
+ }
- audit::logDropAllUsersFromDatabase(ClientBasic::getCurrent(), dbname);
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ std::string userNameString;
+ std::vector<RoleName> roles;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToUser", dbname, &userNameString, &roles, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname),
- writeConcern,
- &numRemoved);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUsersFromDB(dbname);
+ UserName userName(userNameString, dbname);
+ unordered_set<RoleName> userRoles;
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
+ RoleName& roleName = *it;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- result.append("n", numRemoved);
- return true;
+ userRoles.insert(roleName);
}
- } cmdDropAllUsersFromDatabase;
+ audit::logGrantRolesToUser(ClientBasic::getCurrent(), userName, roles);
+ BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
+ status = updatePrivilegeDocument(
+ txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ return appendCommandStatus(result, status);
+ }
- class CmdGrantRolesToUser: public Command {
- public:
+} cmdGrantRolesToUser;
- CmdGrantRolesToUser() : Command("grantRolesToUser") {}
+class CmdRevokeRolesFromUser : public Command {
+public:
+ CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
- virtual bool slaveOk() const {
- return false;
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from a user." << endl;
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to a user." << endl;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
+ std::string userNameString;
+ std::vector<RoleName> roles;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromUser", dbname, &userNameString, &roles, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ UserName userName(userNameString, dbname);
+ unordered_set<RoleName> userRoles;
+ status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
+ RoleName& roleName = *it;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- std::string userNameString;
- std::vector<RoleName> roles;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToUser",
- dbname,
- &userNameString,
- &roles,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ userRoles.erase(roleName);
+ }
- UserName userName(userNameString, dbname);
- unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ audit::logRevokeRolesFromUser(ClientBasic::getCurrent(), userName, roles);
+ BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
+ status = updatePrivilegeDocument(
+ txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserByName(userName);
+ return appendCommandStatus(result, status);
+ }
- for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
- RoleName& roleName = *it;
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdRevokeRolesFromUser;
- userRoles.insert(roleName);
- }
+class CmdUsersInfo : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logGrantRolesToUser(ClientBasic::getCurrent(),
- userName,
- roles);
- BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
- status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- } cmdGrantRolesToUser;
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- class CmdRevokeRolesFromUser: public Command {
- public:
+ CmdUsersInfo() : Command("usersInfo") {}
- CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about users." << endl;
+ }
- virtual bool slaveOk() const {
- return false;
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::UsersInfoArgs args;
+ Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from a user." << endl;
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
+ if (args.allForDB && args.showPrivileges) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ "Can only get privilege details on exact-match usersInfo "
+ "queries."));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ BSONArrayBuilder usersArrayBuilder;
+ if (args.showPrivileges) {
+ // If you want privileges you need to call getUserDescription on each user.
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ BSONObj userDetails;
+ status = getGlobalAuthorizationManager()->getUserDescription(
+ txn, args.userNames[i], &userDetails);
+ if (status.code() == ErrorCodes::UserNotFound) {
+ continue;
+ }
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ if (!args.showCredentials) {
+ // getUserDescription always includes credentials, need to strip it out
+ BSONObjBuilder userWithoutCredentials(usersArrayBuilder.subobjStart());
+ for (BSONObjIterator it(userDetails); it.more();) {
+ BSONElement e = it.next();
+ if (e.fieldNameStringData() != "credentials")
+ userWithoutCredentials.append(e);
+ }
+ userWithoutCredentials.doneFast();
+ } else {
+ usersArrayBuilder.append(userDetails);
+ }
}
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ } else {
+ // If you don't need privileges, you can just do a regular query on system.users
+ BSONObjBuilder queryBuilder;
+ if (args.allForDB) {
+ queryBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, dbname);
+ } else {
+ BSONArrayBuilder usersMatchArray;
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ usersMatchArray.append(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << args.userNames[i].getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << args.userNames[i].getDB()));
+ }
+ queryBuilder.append("$or", usersMatchArray.arr());
}
- std::string userNameString;
- std::vector<RoleName> roles;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromUser",
- dbname,
- &userNameString,
- &roles,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ BSONObjBuilder projection;
+ if (!args.showCredentials) {
+ projection.append("credentials", 0);
}
+ const stdx::function<void(const BSONObj&)> function = stdx::bind(
+ appendBSONObjToBSONArrayBuilder, &usersArrayBuilder, stdx::placeholders::_1);
+ queryAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ queryBuilder.done(),
+ projection.done(),
+ function);
+ }
+ result.append("users", usersArrayBuilder.arr());
+ return true;
+ }
- UserName userName(userNameString, dbname);
- unordered_set<RoleName> userRoles;
- status = getCurrentUserRoles(txn, authzManager, userName, &userRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdUsersInfo;
- for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) {
- RoleName& roleName = *it;
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdCreateRole : public Command {
+public:
+ CmdCreateRole() : Command("createRole") {}
- userRoles.erase(roleName);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- audit::logRevokeRolesFromUser(ClientBasic::getCurrent(),
- userName,
- roles);
- BSONArray newRolesBSONArray = roleSetToBSONArray(userRoles);
- status = updatePrivilegeDocument(
- txn, userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserByName(userName);
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- } cmdRevokeRolesFromUser;
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a role to the system" << endl;
+ }
- class CmdUsersInfo: public Command {
- public:
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
+ }
- virtual bool slaveOk() const {
- return false;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool slaveOverrideOk() const {
- return true;
+ if (args.roleName.getRole().empty()) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::BadValue, "Role name must be non-empty"));
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ if (args.roleName.getDB() == "local") {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
+ }
- CmdUsersInfo() : Command("usersInfo") {}
+ if (args.roleName.getDB() == "$external") {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "Cannot create roles in the $external database"));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Returns information about users." << endl;
+ if (!args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue, "\"createRole\" command requires a \"roles\" array"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
+ if (!args.hasPrivileges) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "\"createRole\" command requires a \"privileges\" array"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ BSONObjBuilder roleObjBuilder;
- auth::UsersInfoArgs args;
- Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ roleObjBuilder.append(
+ "_id", str::stream() << args.roleName.getDB() << "." << args.roleName.getRole());
+ roleObjBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME, args.roleName.getRole());
+ roleObjBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME, args.roleName.getDB());
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
- getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- if (args.allForDB && args.showPrivileges) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- "Can only get privilege details on exact-match usersInfo "
- "queries."));
- }
-
- BSONArrayBuilder usersArrayBuilder;
- if (args.showPrivileges) {
- // If you want privileges you need to call getUserDescription on each user.
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- BSONObj userDetails;
- status = getGlobalAuthorizationManager()->getUserDescription(
- txn, args.userNames[i], &userDetails);
- if (status.code() == ErrorCodes::UserNotFound) {
- continue;
- }
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- if (!args.showCredentials) {
- // getUserDescription always includes credentials, need to strip it out
- BSONObjBuilder userWithoutCredentials(usersArrayBuilder.subobjStart());
- for (BSONObjIterator it(userDetails); it.more(); ) {
- BSONElement e = it.next();
- if (e.fieldNameStringData() != "credentials")
- userWithoutCredentials.append(e);
- }
- userWithoutCredentials.doneFast();
- } else {
- usersArrayBuilder.append(userDetails);
- }
- }
- } else {
- // If you don't need privileges, you can just do a regular query on system.users
- BSONObjBuilder queryBuilder;
- if (args.allForDB) {
- queryBuilder.append(AuthorizationManager::USER_DB_FIELD_NAME, dbname);
- } else {
- BSONArrayBuilder usersMatchArray;
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- usersMatchArray.append(BSON(AuthorizationManager::USER_NAME_FIELD_NAME <<
- args.userNames[i].getUser() <<
- AuthorizationManager::USER_DB_FIELD_NAME <<
- args.userNames[i].getDB()));
- }
- queryBuilder.append("$or", usersMatchArray.arr());
+ BSONArray privileges;
+ status = privilegeVectorToBSONArray(args.privileges, &privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ roleObjBuilder.append("privileges", privileges);
- }
+ roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- BSONObjBuilder projection;
- if (!args.showCredentials) {
- projection.append("credentials", 0);
- }
- const stdx::function<void(const BSONObj&)> function = stdx::bind(
- appendBSONObjToBSONArrayBuilder,
- &usersArrayBuilder,
- stdx::placeholders::_1);
- queryAuthzDocument(txn,
- AuthorizationManager::usersCollectionNamespace,
- queryBuilder.done(),
- projection.done(),
- function);
- }
- result.append("users", usersArrayBuilder.arr());
- return true;
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- } cmdUsersInfo;
-
- class CmdCreateRole: public Command {
- public:
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- CmdCreateRole() : Command("createRole") {}
+ // Role existence has to be checked after acquiring the update lock
+ status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ audit::logCreateRole(ClientBasic::getCurrent(), args.roleName, args.roles, args.privileges);
- virtual void help(stringstream& ss) const {
- ss << "Adds a role to the system" << endl;
- }
+ status = insertRoleDocument(txn, roleObjBuilder.done(), args.writeConcern);
+ return appendCommandStatus(result, status);
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
- }
+} cmdCreateRole;
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "createRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdUpdateRole : public Command {
+public:
+ CmdUpdateRole() : Command("updateRole") {}
- if (args.roleName.getRole().empty()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Role name must be non-empty"));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- if (args.roleName.getDB() == "local") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue, "Cannot create roles in the local database"));
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- if (args.roleName.getDB() == "$external") {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Cannot create roles in the $external database"));
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a role" << endl;
+ }
- if (!args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createRole\" command requires a \"roles\" array"));
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
+ }
- if (!args.hasPrivileges) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "\"createRole\" command requires a \"privileges\" array"));
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObjBuilder roleObjBuilder;
+ if (!args.hasPrivileges && !args.hasRoles) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must specify at least one field to update in updateRole"));
+ }
- roleObjBuilder.append("_id", str::stream() << args.roleName.getDB() << "." <<
- args.roleName.getRole());
- roleObjBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME,
- args.roleName.getRole());
- roleObjBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME,
- args.roleName.getDB());
+ BSONObjBuilder updateSetBuilder;
+ if (args.hasPrivileges) {
BSONArray privileges;
status = privilegeVectorToBSONArray(args.privileges, &privileges);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- roleObjBuilder.append("privileges", privileges);
+ updateSetBuilder.append("privileges", privileges);
+ }
- roleObjBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ if (args.hasRoles) {
+ updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
+ }
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ // Role existence has to be checked after acquiring the update lock
+ BSONObj ignored;
+ status = authzManager->getRoleDescription(args.roleName, false, &ignored);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- // Role existence has to be checked after acquiring the update lock
+ if (args.hasRoles) {
status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
+ if (args.hasPrivileges) {
status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- audit::logCreateRole(ClientBasic::getCurrent(),
- args.roleName,
- args.roles,
- args.privileges);
+ audit::logUpdateRole(ClientBasic::getCurrent(),
+ args.roleName,
+ args.hasRoles ? &args.roles : NULL,
+ args.hasPrivileges ? &args.privileges : NULL);
- status = insertRoleDocument(txn, roleObjBuilder.done(), args.writeConcern);
+ status = updateRoleDocument(
+ txn, args.roleName, BSON("$set" << updateSetBuilder.done()), args.writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
+} cmdUpdateRole;
+
+class CmdGrantPrivilegesToRole : public Command {
+public:
+ CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Grants privileges to a role" << endl;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdCreateRole;
+ RoleName roleName;
+ PrivilegeVector privilegesToAdd;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateRolePrivilegeManipulationCommands(
+ cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- class CmdUpdateRole: public Command {
- public:
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- CmdUpdateRole() : Command("updateRole") {}
+ status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, true, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ PrivilegeVector privileges;
+ status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
+ &privileges);
- virtual void help(stringstream& ss) const {
- ss << "Used to update a role" << endl;
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
+ for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end();
+ ++it) {
+ Privilege::addPrivilegeToPrivilegeVector(&privileges, *it);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "updateRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ // Build up update modifier object to $set privileges.
+ mutablebson::Document updateObj;
+ mutablebson::Element setElement = updateObj.makeElementObject("$set");
+ status = updateObj.root().pushBack(setElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
+ status = setElement.pushBack(privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- if (!args.hasPrivileges && !args.hasRoles) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::BadValue,
- "Must specify at least one field to update in updateRole"));
- }
+ BSONObjBuilder updateBSONBuilder;
+ updateObj.writeTo(&updateBSONBuilder);
- BSONObjBuilder updateSetBuilder;
+ audit::logGrantPrivilegesToRole(ClientBasic::getCurrent(), roleName, privilegesToAdd);
- if (args.hasPrivileges) {
- BSONArray privileges;
- status = privilegeVectorToBSONArray(args.privileges, &privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- updateSetBuilder.append("privileges", privileges);
- }
+ status = updateRoleDocument(txn, roleName, updateBSONBuilder.done(), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- if (args.hasRoles) {
- updateSetBuilder.append("roles", rolesVectorToBSONArray(args.roles));
- }
+} cmdGrantPrivilegesToRole;
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+class CmdRevokePrivilegesFromRole : public Command {
+public:
+ CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Role existence has to be checked after acquiring the update lock
- BSONObj ignored;
- status = authzManager->getRoleDescription(args.roleName, false, &ignored);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- if (args.hasRoles) {
- status = checkOkayToGrantRolesToRole(args.roleName, args.roles, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes privileges from a role" << endl;
+ }
- if (args.hasPrivileges) {
- status = checkOkayToGrantPrivilegesToRole(args.roleName, args.privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
+ }
- audit::logUpdateRole(ClientBasic::getCurrent(),
- args.roleName,
- args.hasRoles? &args.roles : NULL,
- args.hasPrivileges? &args.privileges : NULL);
-
- status = updateRoleDocument(txn,
- args.roleName,
- BSON("$set" << updateSetBuilder.done()),
- args.writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdUpdateRole;
-
- class CmdGrantPrivilegesToRole: public Command {
- public:
- CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ RoleName roleName;
+ PrivilegeVector privilegesToRemove;
+ BSONObj writeConcern;
+ status = auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
+ "revokePrivilegesFromRole",
+ dbname,
+ &roleName,
+ &privilegesToRemove,
+ &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Grants privileges to a role" << endl;
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, true, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ PrivilegeVector privileges;
+ status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
+ &privileges);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- RoleName roleName;
- PrivilegeVector privilegesToAdd;
- BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(
- cmdObj,
- "grantPrivilegesToRole",
- dbname,
- &roleName,
- &privilegesToAdd,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
+ itToRm != privilegesToRemove.end();
+ ++itToRm) {
+ for (PrivilegeVector::iterator curIt = privileges.begin(); curIt != privileges.end();
+ ++curIt) {
+ if (curIt->getResourcePattern() == itToRm->getResourcePattern()) {
+ curIt->removeActions(itToRm->getActions());
+ if (curIt->getActions().empty()) {
+ privileges.erase(curIt);
+ }
+ break;
+ }
}
+ }
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ // Build up update modifier object to $set privileges.
+ mutablebson::Document updateObj;
+ mutablebson::Element setElement = updateObj.makeElementObject("$set");
+ status = updateObj.root().pushBack(setElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
+ status = setElement.pushBack(privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- status = checkOkayToGrantPrivilegesToRole(roleName, privilegesToAdd);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ audit::logRevokePrivilegesFromRole(ClientBasic::getCurrent(), roleName, privilegesToRemove);
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, true, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ BSONObjBuilder updateBSONBuilder;
+ updateObj.writeTo(&updateBSONBuilder);
+ status = updateRoleDocument(txn, roleName, updateBSONBuilder.done(), writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- PrivilegeVector privileges;
- status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
- &privileges);
+} cmdRevokePrivilegesFromRole;
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdGrantRolesToRole : public Command {
+public:
+ CmdGrantRolesToRole() : Command("grantRolesToRole") {}
- for (PrivilegeVector::iterator it = privilegesToAdd.begin();
- it != privilegesToAdd.end(); ++it) {
- Privilege::addPrivilegeToPrivilegeVector(&privileges, *it);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Build up update modifier object to $set privileges.
- mutablebson::Document updateObj;
- mutablebson::Element setElement = updateObj.makeElementObject("$set");
- status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
- status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- BSONObjBuilder updateBSONBuilder;
- updateObj.writeTo(&updateBSONBuilder);
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to another role." << endl;
+ }
- audit::logGrantPrivilegesToRole(ClientBasic::getCurrent(),
- roleName,
- privilegesToAdd);
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
+ }
- status = updateRoleDocument(
- txn,
- roleName,
- updateBSONBuilder.done(),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ std::string roleNameString;
+ std::vector<RoleName> rolesToAdd;
+ BSONObj writeConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToRole", dbname, &roleNameString, &rolesToAdd, &writeConcern);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdGrantPrivilegesToRole;
-
- class CmdRevokePrivilegesFromRole: public Command {
- public:
+ RoleName roleName(roleNameString, dbname);
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- virtual bool slaveOk() const {
- return false;
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ // Role existence has to be checked after acquiring the update lock
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes privileges from a role" << endl;
+ // Check for cycles
+ status = checkOkayToGrantRolesToRole(roleName, rolesToAdd, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
+ // Add new roles to existing roles
+ std::vector<RoleName> directRoles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &directRoles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
+ const RoleName& roleToAdd = *it;
+ if (!sequenceContains(directRoles, roleToAdd)) // Don't double-add role
+ directRoles.push_back(*it);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ audit::logGrantRolesToRole(ClientBasic::getCurrent(), roleName, rolesToAdd);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ status =
+ updateRoleDocument(txn,
+ roleName,
+ BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))),
+ writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- RoleName roleName;
- PrivilegeVector privilegesToRemove;
- BSONObj writeConcern;
- status = auth::parseAndValidateRolePrivilegeManipulationCommands(
- cmdObj,
- "revokePrivilegesFromRole",
- dbname,
- &roleName,
- &privilegesToRemove,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdGrantRolesToRole;
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+class CmdRevokeRolesFromRole : public Command {
+public:
+ CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, true, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- PrivilegeVector privileges;
- status = auth::parseAndValidatePrivilegeArray(BSONArray(roleDoc["privileges"].Obj()),
- &privileges);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- for (PrivilegeVector::iterator itToRm = privilegesToRemove.begin();
- itToRm != privilegesToRemove.end(); ++itToRm) {
- for (PrivilegeVector::iterator curIt = privileges.begin();
- curIt != privileges.end(); ++curIt) {
- if (curIt->getResourcePattern() == itToRm->getResourcePattern()) {
- curIt->removeActions(itToRm->getActions());
- if (curIt->getActions().empty()) {
- privileges.erase(curIt);
- }
- break;
- }
- }
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from another role." << endl;
+ }
- // Build up update modifier object to $set privileges.
- mutablebson::Document updateObj;
- mutablebson::Element setElement = updateObj.makeElementObject("$set");
- status = updateObj.root().pushBack(setElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges");
- status = setElement.pushBack(privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- status = authzManager->getBSONForPrivileges(privileges, privilegesElement);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
+ }
- audit::logRevokePrivilegesFromRole(ClientBasic::getCurrent(),
- roleName,
- privilegesToRemove);
-
- BSONObjBuilder updateBSONBuilder;
- updateObj.writeTo(&updateBSONBuilder);
- status = updateRoleDocument(
- txn,
- roleName,
- updateBSONBuilder.done(),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdRevokePrivilegesFromRole;
-
- class CmdGrantRolesToRole: public Command {
- public:
-
- CmdGrantRolesToRole() : Command("grantRolesToRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ std::string roleNameString;
+ std::vector<RoleName> rolesToRemove;
+ BSONObj writeConcern;
+ status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromRole", dbname, &roleNameString, &rolesToRemove, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ RoleName roleName(roleNameString, dbname);
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
+ }
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to another role." << endl;
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
+ std::vector<RoleName> roles;
+ status = auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleDoc["roles"].Obj()), roleName.getDB(), &roles);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- std::string roleNameString;
- std::vector<RoleName> rolesToAdd;
- BSONObj writeConcern;
- Status status = auth::parseRolePossessionManipulationCommands(
- cmdObj,
- "grantRolesToRole",
- dbname,
- &roleNameString,
- &rolesToAdd,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ for (vector<RoleName>::const_iterator it = rolesToRemove.begin(); it != rolesToRemove.end();
+ ++it) {
+ vector<RoleName>::iterator itToRm = std::find(roles.begin(), roles.end(), *it);
+ if (itToRm != roles.end()) {
+ roles.erase(itToRm);
}
+ }
- RoleName roleName(roleNameString, dbname);
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ audit::logRevokeRolesFromRole(ClientBasic::getCurrent(), roleName, rolesToRemove);
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ status = updateRoleDocument(txn,
+ roleName,
+ BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))),
+ writeConcern);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ return appendCommandStatus(result, status);
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdRevokeRolesFromRole;
- // Role existence has to be checked after acquiring the update lock
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropRole : public Command {
+public:
+ CmdDropRole() : Command("dropRole") {}
- // Check for cycles
- status = checkOkayToGrantRolesToRole(roleName, rolesToAdd, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- // Add new roles to existing roles
- std::vector<RoleName> directRoles;
- status = auth::parseRoleNamesFromBSONArray(BSONArray(roleDoc["roles"].Obj()),
- roleName.getDB(),
- &directRoles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- for (vector<RoleName>::iterator it = rolesToAdd.begin(); it != rolesToAdd.end(); ++it) {
- const RoleName& roleToAdd = *it;
- if (!sequenceContains(directRoles, roleToAdd)) // Don't double-add role
- directRoles.push_back(*it);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- audit::logGrantRolesToRole(ClientBasic::getCurrent(),
- roleName,
- rolesToAdd);
-
- status = updateRoleDocument(
- txn,
- roleName,
- BSON("$set" << BSON("roles" << rolesVectorToBSONArray(directRoles))),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single role. Before deleting the role completely it must remove it "
+ "from any users or roles that reference it. If any errors occur in the middle "
+ "of that process it's possible to be left in a state where the role has been "
+ "removed from some user/roles but otherwise still exists." << endl;
+ }
- } cmdGrantRolesToRole;
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
+ }
- class CmdRevokeRolesFromRole: public Command {
- public:
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
+
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ Status status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
+ RoleName roleName;
+ BSONObj writeConcern;
+ status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &writeConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual bool slaveOk() const {
- return false;
+ if (RoleGraph::isBuiltinRole(roleName)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidRoleModification,
+ str::stream() << roleName.getFullName()
+ << " is a built-in role and cannot be modified."));
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ BSONObj roleDoc;
+ status = authzManager->getRoleDescription(roleName, false, &roleDoc);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from another role." << endl;
+ // Remove this role from all users
+ int nMatched;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::UserModificationFailed
+ : status.code();
+ return appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Failed to remove role " << roleName.getFullName()
+ << " from all users: " << status.reason()));
+ }
+
+ // Remove this role from all other roles
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON("roles" << BSON("$elemMatch" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ BSON("$pull" << BSON("roles" << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::RoleModificationFailed
+ : status.code();
+ return appendCommandStatus(
+ result,
+ Status(code,
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users but failed to remove from all roles: "
+ << status.reason()));
+ }
+
+ audit::logDropRole(ClientBasic::getCurrent(), roleName);
+ // Finally, remove the actual role document
+ status = removeRoleDocuments(txn,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()),
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ return appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Removed role " << roleName.getFullName()
+ << " from all users and roles but failed to actually delete"
+ " the role itself: " << status.reason()));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
+ dassert(nMatched == 0 || nMatched == 1);
+ if (nMatched == 0) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::RoleNotFound,
+ str::stream() << "Role '" << roleName.getFullName() << "' not found"));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ return true;
+ }
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+} cmdDropRole;
- std::string roleNameString;
- std::vector<RoleName> rolesToRemove;
- BSONObj writeConcern;
- status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromRole",
- dbname,
- &roleNameString,
- &rolesToRemove,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdDropAllRolesFromDatabase : public Command {
+public:
+ CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
- RoleName roleName(roleNameString, dbname);
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
+ virtual bool slaveOk() const {
+ return false;
+ }
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- std::vector<RoleName> roles;
- status = auth::parseRoleNamesFromBSONArray(BSONArray(roleDoc["roles"].Obj()),
- roleName.getDB(),
- &roles);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all roles from the given database. Before deleting the roles completely "
+ "it must remove them from any users or other roles that reference them. If any "
+ "errors occur in the middle of that process it's possible to be left in a state "
+ "where the roles have been removed from some user/roles but otherwise still "
+ "exist." << endl;
+ }
- for (vector<RoleName>::const_iterator it = rolesToRemove.begin();
- it != rolesToRemove.end(); ++it) {
- vector<RoleName>::iterator itToRm = std::find(roles.begin(), roles.end(), *it);
- if (itToRm != roles.end()) {
- roles.erase(itToRm);
- }
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
+ }
- audit::logRevokeRolesFromRole(ClientBasic::getCurrent(),
- roleName,
- rolesToRemove);
-
- status = updateRoleDocument(
- txn,
- roleName,
- BSON("$set" << BSON("roles" << rolesVectorToBSONArray(roles))),
- writeConcern);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BSONObj writeConcern;
+ Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj, dbname, &writeConcern);
+ if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- } cmdRevokeRolesFromRole;
-
- class CmdDropRole: public Command {
- public:
-
- CmdDropRole() : Command("dropRole") {}
-
- virtual bool slaveOk() const {
- return false;
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
}
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops a single role. Before deleting the role completely it must remove it "
- "from any users or roles that reference it. If any errors occur in the middle "
- "of that process it's possible to be left in a state where the role has been "
- "removed from some user/roles but otherwise still exists."<< endl;
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
+ // Remove these roles from all users
+ int nMatched;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
+ BSON("$pull" << BSON("roles"
+ << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::UserModificationFailed
+ : status.code();
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream()
+ << "Failed to remove roles from \"" << dbname
+ << "\" db from all users: " << status.reason()));
+ }
+
+ // Remove these roles from all other roles
+ std::string sourceFieldName = str::stream() << "roles."
+ << AuthorizationManager::ROLE_DB_FIELD_NAME;
+ status = updateAuthzDocuments(
+ txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(sourceFieldName << dbname),
+ BSON("$pull" << BSON("roles"
+ << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname))),
+ false,
+ true,
+ writeConcern,
+ &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
+ ? ErrorCodes::RoleModificationFailed
+ : status.code();
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream()
+ << "Failed to remove roles from \"" << dbname
+ << "\" db from all roles: " << status.reason()));
+ }
+
+ audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
+ // Finally, remove the actual role documents
+ status = removeRoleDocuments(
+ txn, BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname), writeConcern, &nMatched);
+ // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
+ authzManager->invalidateUserCache();
+ if (!status.isOK()) {
+ return appendCommandStatus(
+ result,
+ Status(status.code(),
+ str::stream() << "Removed roles from \"" << dbname
+ << "\" db "
+ " from all users and roles but failed to actually delete"
+ " those roles themselves: " << status.reason()));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
+ result.append("n", nMatched);
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- Status status = requireAuthSchemaVersion26Final(txn, authzManager);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- RoleName roleName;
- BSONObj writeConcern;
- status = auth::parseDropRoleCommand(cmdObj,
- dbname,
- &roleName,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- if (RoleGraph::isBuiltinRole(roleName)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidRoleModification,
- str::stream() << roleName.getFullName() <<
- " is a built-in role and cannot be modified."));
- }
-
- BSONObj roleDoc;
- status = authzManager->getRoleDescription(roleName, false, &roleDoc);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- // Remove this role from all users
- int nMatched;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON("roles" << BSON("$elemMatch" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::UserModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users: " << status.reason()));
- }
-
- // Remove this role from all other roles
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON("roles" << BSON("$elemMatch" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB()))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::RoleModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Removed role " << roleName.getFullName() <<
- " from all users but failed to remove from all roles: " <<
- status.reason()));
- }
-
- audit::logDropRole(ClientBasic::getCurrent(),
- roleName);
- // Finally, remove the actual role document
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << roleName.getRole() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << roleName.getDB()),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- return appendCommandStatus(
- result,
- Status(status.code(),
- str::stream() << "Removed role " << roleName.getFullName() <<
- " from all users and roles but failed to actually delete"
- " the role itself: " << status.reason()));
- }
+ return true;
+ }
- dassert(nMatched == 0 || nMatched == 1);
- if (nMatched == 0) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::RoleNotFound,
- str::stream() << "Role '" << roleName.getFullName() <<
- "' not found"));
- }
+} cmdDropAllRolesFromDatabase;
- return true;
- }
+class CmdRolesInfo : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
- } cmdDropRole;
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- class CmdDropAllRolesFromDatabase: public Command {
- public:
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
+ CmdRolesInfo() : Command("rolesInfo") {}
- virtual bool slaveOk() const {
- return false;
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about roles." << endl;
+ }
- virtual bool isWriteCommandForConfigServer() const { return true; }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
+ }
- virtual void help(stringstream& ss) const {
- ss << "Drops all roles from the given database. Before deleting the roles completely "
- "it must remove them from any users or other roles that reference them. If any "
- "errors occur in the middle of that process it's possible to be left in a state "
- "where the roles have been removed from some user/roles but otherwise still "
- "exist." << endl;
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::RolesInfoArgs args;
+ Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
+ status = requireAuthSchemaVersion26UpgradeOrFinal(txn, getGlobalAuthorizationManager());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- BSONObj writeConcern;
- Status status = auth::parseDropAllRolesFromDatabaseCommand(cmdObj,
- dbname,
- &writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ BSONArrayBuilder rolesArrayBuilder;
+ if (args.allForDB) {
+ std::vector<BSONObj> rolesDocs;
+ status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
+ dbname, args.showPrivileges, args.showBuiltinRoles, &rolesDocs);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- // Remove these roles from all users
- int nMatched;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON("roles" << BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname)),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME <<
- dbname))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::UserModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all users: " << status.reason()));
+ for (size_t i = 0; i < rolesDocs.size(); ++i) {
+ rolesArrayBuilder.append(rolesDocs[i]);
}
-
- // Remove these roles from all other roles
- std::string sourceFieldName =
- str::stream() << "roles." << AuthorizationManager::ROLE_DB_FIELD_NAME;
- status = updateAuthzDocuments(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(sourceFieldName << dbname),
- BSON("$pull" << BSON("roles" <<
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME <<
- dbname))),
- false,
- true,
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError ?
- ErrorCodes::RoleModificationFailed : status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove roles from \"" << dbname
- << "\" db from all roles: " << status.reason()));
- }
-
- audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
- // Finally, remove the actual role documents
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname),
- writeConcern,
- &nMatched);
- // Must invalidate even on bad status - what if the write succeeded but the GLE failed?
- authzManager->invalidateUserCache();
- if (!status.isOK()) {
- return appendCommandStatus(
- result,
- Status(status.code(),
- str::stream() << "Removed roles from \"" << dbname << "\" db "
- " from all users and roles but failed to actually delete"
- " those roles themselves: " << status.reason()));
+ } else {
+ for (size_t i = 0; i < args.roleNames.size(); ++i) {
+ BSONObj roleDetails;
+ status = getGlobalAuthorizationManager()->getRoleDescription(
+ args.roleNames[i], args.showPrivileges, &roleDetails);
+ if (status.code() == ErrorCodes::RoleNotFound) {
+ continue;
+ }
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ rolesArrayBuilder.append(roleDetails);
}
-
- result.append("n", nMatched);
-
- return true;
}
+ result.append("roles", rolesArrayBuilder.arr());
+ return true;
+ }
- } cmdDropAllRolesFromDatabase;
+} cmdRolesInfo;
- class CmdRolesInfo: public Command {
- public:
+class CmdInvalidateUserCache : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool slaveOk() const {
- return false;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual bool slaveOverrideOk() const {
- return true;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ CmdInvalidateUserCache() : Command("invalidateUserCache") {}
- CmdRolesInfo() : Command("rolesInfo") {}
+ virtual void help(stringstream& ss) const {
+ ss << "Invalidates the in-memory cache of user information" << endl;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Returns information about roles." << endl;
- }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForInvalidateUserCacheCommand(client);
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ authzManager->invalidateUserCache();
+ return true;
+ }
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+} cmdInvalidateUserCache;
- auth::RolesInfoArgs args;
- Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+class CmdGetCacheGeneration : public Command {
+public:
+ virtual bool slaveOk() const {
+ return true;
+ }
- status = requireAuthSchemaVersion26UpgradeOrFinal(txn,
- getGlobalAuthorizationManager());
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- BSONArrayBuilder rolesArrayBuilder;
- if (args.allForDB) {
- std::vector<BSONObj> rolesDocs;
- status = getGlobalAuthorizationManager()->getRoleDescriptionsForDB(
- dbname, args.showPrivileges, args.showBuiltinRoles, &rolesDocs);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- for (size_t i = 0; i < rolesDocs.size(); ++i) {
- rolesArrayBuilder.append(rolesDocs[i]);
- }
- } else {
- for (size_t i = 0; i < args.roleNames.size(); ++i) {
- BSONObj roleDetails;
- status = getGlobalAuthorizationManager()->getRoleDescription(
- args.roleNames[i], args.showPrivileges, &roleDetails);
- if (status.code() == ErrorCodes::RoleNotFound) {
- continue;
- }
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- rolesArrayBuilder.append(roleDetails);
- }
- }
- result.append("roles", rolesArrayBuilder.arr());
- return true;
- }
+ CmdGetCacheGeneration() : Command("_getUserCacheGeneration") {}
- } cmdRolesInfo;
+ virtual void help(stringstream& ss) const {
+ ss << "internal" << endl;
+ }
- class CmdInvalidateUserCache: public Command {
- public:
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGetUserCacheGenerationCommand(client);
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ result.append("cacheGeneration", authzManager->getCacheGeneration());
+ return true;
+ }
- virtual bool adminOnly() const {
- return true;
- }
+} CmdGetCacheGeneration;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * This command is used only by mongorestore to handle restoring users/roles. We do this so
+ * that mongorestore doesn't do direct inserts into the admin.system.users and
+ * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
+ * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
+ * definitions it wants to restore into temporary collections, then this command moves those
+ * user/role definitions into their proper place in admin.system.users and admin.system.roles.
+ * It either adds the users/roles to the existing ones or replaces the existing ones, depending
+ * on whether the "drop" argument is true or false.
+ */
+class CmdMergeAuthzCollections : public Command {
+public:
+ CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
- CmdInvalidateUserCache() : Command("invalidateUserCache") {}
+ virtual bool isWriteCommandForConfigServer() const {
+ return true;
+ }
- virtual void help(stringstream& ss) const {
- ss << "Invalidates the in-memory cache of user information" << endl;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForInvalidateUserCacheCommand(client);
- }
+ virtual void help(stringstream& ss) const {
+ ss << "Internal command used by mongorestore for updating user/role data" << endl;
+ }
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
+ }
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- authzManager->invalidateUserCache();
- return true;
- }
+ static UserName extractUserNameFromBSON(const BSONObj& userObj) {
+ std::string name;
+ std::string db;
+ Status status =
+ bsonExtractStringField(userObj, AuthorizationManager::USER_NAME_FIELD_NAME, &name);
+ uassertStatusOK(status);
+ status = bsonExtractStringField(userObj, AuthorizationManager::USER_DB_FIELD_NAME, &db);
+ uassertStatusOK(status);
+ return UserName(name, db);
+ }
- } cmdInvalidateUserCache;
+ /**
+ * Extracts the UserName from the user document and adds it to set of existing users.
+ * This function is written so it can used with stdx::bind over the result set of a query
+ * on admin.system.users to add the user names of all existing users to the "usersToDrop"
+ * set used in the command body.
+ */
+ static void extractAndInsertUserName(unordered_set<UserName>* existingUsers,
+ const BSONObj& userObj) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ existingUsers->insert(userName);
+ }
- class CmdGetCacheGeneration: public Command {
- public:
+ static RoleName extractRoleNameFromBSON(const BSONObj& roleObj) {
+ std::string name;
+ std::string db;
+ Status status =
+ bsonExtractStringField(roleObj, AuthorizationManager::ROLE_NAME_FIELD_NAME, &name);
+ uassertStatusOK(status);
+ status = bsonExtractStringField(roleObj, AuthorizationManager::ROLE_DB_FIELD_NAME, &db);
+ uassertStatusOK(status);
+ return RoleName(name, db);
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ /**
+ * Extracts the RoleName from the role document and adds it to set of existing roles.
+ * This function is written so it can used with stdx::bind over the result set of a query
+ * on admin.system.roles to add the role names of all existing roles to the "rolesToDrop"
+ * set used in the command body.
+ */
+ static void extractAndInsertRoleName(unordered_set<RoleName>* existingRoles,
+ const BSONObj& roleObj) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ existingRoles->insert(roleName);
+ }
- virtual bool adminOnly() const {
- return true;
+ /**
+ * Audits the fact that we are creating or updating the user described by userObj.
+ */
+ static void auditCreateOrUpdateUser(const BSONObj& userObj, bool create) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ std::vector<RoleName> roles;
+ uassertStatusOK(auth::parseRoleNamesFromBSONArray(
+ BSONArray(userObj["roles"].Obj()), userName.getDB(), &roles));
+ BSONObj customData;
+ if (userObj.hasField("customData")) {
+ customData = userObj["customData"].Obj();
}
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- CmdGetCacheGeneration() : Command("_getUserCacheGeneration") {}
-
- virtual void help(stringstream& ss) const {
- ss << "internal" << endl;
+ if (create) {
+ audit::logCreateUser(ClientBasic::getCurrent(),
+ userName,
+ userObj["credentials"].Obj().hasField("MONGODB-CR"),
+ userObj.hasField("customData") ? &customData : NULL,
+ roles);
+ } else {
+ audit::logUpdateUser(ClientBasic::getCurrent(),
+ userName,
+ userObj["credentials"].Obj().hasField("MONGODB-CR"),
+ userObj.hasField("customData") ? &customData : NULL,
+ &roles);
}
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGetUserCacheGenerationCommand(client);
+ /**
+ * Audits the fact that we are creating or updating the role described by roleObj.
+ */
+ static void auditCreateOrUpdateRole(const BSONObj& roleObj, bool create) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ std::vector<RoleName> roles;
+ std::vector<Privilege> privileges;
+ uassertStatusOK(auth::parseRoleNamesFromBSONArray(
+ BSONArray(roleObj["roles"].Obj()), roleName.getDB(), &roles));
+ uassertStatusOK(auth::parseAndValidatePrivilegeArray(BSONArray(roleObj["privileges"].Obj()),
+ &privileges));
+ if (create) {
+ audit::logCreateRole(ClientBasic::getCurrent(), roleName, roles, privileges);
+ } else {
+ audit::logUpdateRole(ClientBasic::getCurrent(), roleName, &roles, &privileges);
}
+ }
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- result.append("cacheGeneration", authzManager->getCacheGeneration());
- return true;
+ /**
+ * Designed to be used with stdx::bind to be called on every user object in the result
+ * set of a query over the tempUsersCollection provided to the command. For each user
+ * in the temp collection that is defined on the given db, adds that user to the actual
+ * admin.system.users collection.
+ * Also removes any users it encounters from the usersToDrop set.
+ */
+ static void addUser(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData db,
+ bool update,
+ const BSONObj& writeConcern,
+ unordered_set<UserName>* usersToDrop,
+ const BSONObj& userObj) {
+ UserName userName = extractUserNameFromBSON(userObj);
+ if (!db.empty() && userName.getDB() != db) {
+ return;
}
- } CmdGetCacheGeneration;
+ if (update && usersToDrop->count(userName)) {
+ auditCreateOrUpdateUser(userObj, false);
+ Status status = updatePrivilegeDocument(txn, userName, userObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not update user " << userName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ } else {
+ auditCreateOrUpdateUser(userObj, true);
+ Status status = insertPrivilegeDocument(txn, userObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not insert user " << userName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ }
+ usersToDrop->erase(userName);
+ }
/**
- * This command is used only by mongorestore to handle restoring users/roles. We do this so
- * that mongorestore doesn't do direct inserts into the admin.system.users and
- * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
- * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
- * definitions it wants to restore into temporary collections, then this command moves those
- * user/role definitions into their proper place in admin.system.users and admin.system.roles.
- * It either adds the users/roles to the existing ones or replaces the existing ones, depending
- * on whether the "drop" argument is true or false.
+ * Designed to be used with stdx::bind to be called on every role object in the result
+ * set of a query over the tempRolesCollection provided to the command. For each role
+ * in the temp collection that is defined on the given db, adds that role to the actual
+ * admin.system.roles collection.
+ * Also removes any roles it encounters from the rolesToDrop set.
*/
- class CmdMergeAuthzCollections : public Command {
- public:
-
- CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
-
- virtual bool slaveOk() const {
- return false;
- }
-
- virtual bool isWriteCommandForConfigServer() const { return true; }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual void help(stringstream& ss) const {
- ss << "Internal command used by mongorestore for updating user/role data" << endl;
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
- }
-
- static UserName extractUserNameFromBSON(const BSONObj& userObj) {
- std::string name;
- std::string db;
- Status status = bsonExtractStringField(userObj,
- AuthorizationManager::USER_NAME_FIELD_NAME,
- &name);
- uassertStatusOK(status);
- status = bsonExtractStringField(userObj,
- AuthorizationManager::USER_DB_FIELD_NAME,
- &db);
- uassertStatusOK(status);
- return UserName(name, db);
- }
-
- /**
- * Extracts the UserName from the user document and adds it to set of existing users.
- * This function is written so it can used with stdx::bind over the result set of a query
- * on admin.system.users to add the user names of all existing users to the "usersToDrop"
- * set used in the command body.
- */
- static void extractAndInsertUserName(unordered_set<UserName>* existingUsers,
- const BSONObj& userObj) {
-
- UserName userName = extractUserNameFromBSON(userObj);
- existingUsers->insert(userName);
- }
-
- static RoleName extractRoleNameFromBSON(const BSONObj& roleObj) {
- std::string name;
- std::string db;
- Status status = bsonExtractStringField(roleObj,
- AuthorizationManager::ROLE_NAME_FIELD_NAME,
- &name);
- uassertStatusOK(status);
- status = bsonExtractStringField(roleObj,
- AuthorizationManager::ROLE_DB_FIELD_NAME,
- &db);
- uassertStatusOK(status);
- return RoleName(name, db);
- }
-
- /**
- * Extracts the RoleName from the role document and adds it to set of existing roles.
- * This function is written so it can used with stdx::bind over the result set of a query
- * on admin.system.roles to add the role names of all existing roles to the "rolesToDrop"
- * set used in the command body.
- */
- static void extractAndInsertRoleName(unordered_set<RoleName>* existingRoles,
- const BSONObj& roleObj) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- existingRoles->insert(roleName);
- }
-
- /**
- * Audits the fact that we are creating or updating the user described by userObj.
- */
- static void auditCreateOrUpdateUser(const BSONObj& userObj, bool create) {
- UserName userName = extractUserNameFromBSON(userObj);
- std::vector<RoleName> roles;
- uassertStatusOK(auth::parseRoleNamesFromBSONArray(BSONArray(userObj["roles"].Obj()),
- userName.getDB(),
- &roles));
- BSONObj customData;
- if (userObj.hasField("customData")) {
- customData = userObj["customData"].Obj();
- }
-
- if (create) {
- audit::logCreateUser(ClientBasic::getCurrent(),
- userName,
- userObj["credentials"].Obj().hasField("MONGODB-CR"),
- userObj.hasField("customData") ? &customData : NULL,
- roles);
- } else {
- audit::logUpdateUser(ClientBasic::getCurrent(),
- userName,
- userObj["credentials"].Obj().hasField("MONGODB-CR"),
- userObj.hasField("customData") ? &customData : NULL,
- &roles);
-
- }
+ static void addRole(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData db,
+ bool update,
+ const BSONObj& writeConcern,
+ unordered_set<RoleName>* rolesToDrop,
+ const BSONObj roleObj) {
+ RoleName roleName = extractRoleNameFromBSON(roleObj);
+ if (!db.empty() && roleName.getDB() != db) {
+ return;
}
- /**
- * Audits the fact that we are creating or updating the role described by roleObj.
- */
- static void auditCreateOrUpdateRole(const BSONObj& roleObj, bool create) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- std::vector<RoleName> roles;
- std::vector<Privilege> privileges;
- uassertStatusOK(auth::parseRoleNamesFromBSONArray(BSONArray(roleObj["roles"].Obj()),
- roleName.getDB(),
- &roles));
- uassertStatusOK(auth::parseAndValidatePrivilegeArray(
- BSONArray(roleObj["privileges"].Obj()), &privileges));
- if (create) {
- audit::logCreateRole(ClientBasic::getCurrent(), roleName, roles, privileges);
- } else {
- audit::logUpdateRole(ClientBasic::getCurrent(), roleName, &roles, &privileges);
+ if (update && rolesToDrop->count(roleName)) {
+ auditCreateOrUpdateRole(roleObj, false);
+ Status status = updateRoleDocument(txn, roleName, roleObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not update role " << roleName
+ << " in _mergeAuthzCollections command: " << status << endl;
+ }
+ } else {
+ auditCreateOrUpdateRole(roleObj, true);
+ Status status = insertRoleDocument(txn, roleObj, writeConcern);
+ if (!status.isOK()) {
+ // Match the behavior of mongorestore to continue on failure
+ warning() << "Could not insert role " << roleName
+ << " in _mergeAuthzCollections command: " << status << endl;
}
}
+ rolesToDrop->erase(roleName);
+ }
- /**
- * Designed to be used with stdx::bind to be called on every user object in the result
- * set of a query over the tempUsersCollection provided to the command. For each user
- * in the temp collection that is defined on the given db, adds that user to the actual
- * admin.system.users collection.
- * Also removes any users it encounters from the usersToDrop set.
- */
- static void addUser(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData db,
- bool update,
- const BSONObj& writeConcern,
- unordered_set<UserName>* usersToDrop,
- const BSONObj& userObj) {
- UserName userName = extractUserNameFromBSON(userObj);
- if (!db.empty() && userName.getDB() != db) {
- return;
+ /**
+ * Moves all user objects from usersCollName into admin.system.users. If drop is true,
+ * removes any users that were in admin.system.users but not in usersCollName.
+ */
+ Status processUsers(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData usersCollName,
+ StringData db,
+ bool drop,
+ const BSONObj& writeConcern) {
+ // When the "drop" argument has been provided, we use this set to store the users
+ // that are currently in the system, and remove from it as we encounter
+ // same-named users in the collection we are restoring from. Once we've fully
+ // moved over the temp users collection into its final location, we drop
+ // any users that previously existed there but weren't in the temp collection.
+ // This is so that we can completely replace the system.users
+ // collection with the users from the temp collection, without removing all
+ // users at the beginning and thus potentially locking ourselves out by having
+ // no users in the whole system for a time.
+ unordered_set<UserName> usersToDrop;
+
+ if (drop) {
+ // Create map of the users currently in the DB
+ BSONObj query =
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
+ BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+
+ Status status =
+ queryAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ query,
+ fields,
+ stdx::bind(&CmdMergeAuthzCollections::extractAndInsertUserName,
+ &usersToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
}
+ }
- if (update && usersToDrop->count(userName)) {
- auditCreateOrUpdateUser(userObj, false);
- Status status = updatePrivilegeDocument(txn,
- userName,
- userObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not update user " << userName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- } else {
- auditCreateOrUpdateUser(userObj, true);
- Status status = insertPrivilegeDocument(txn,
- userObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert user " << userName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- }
- usersToDrop->erase(userName);
- }
-
- /**
- * Designed to be used with stdx::bind to be called on every role object in the result
- * set of a query over the tempRolesCollection provided to the command. For each role
- * in the temp collection that is defined on the given db, adds that role to the actual
- * admin.system.roles collection.
- * Also removes any roles it encounters from the rolesToDrop set.
- */
- static void addRole(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData db,
- bool update,
- const BSONObj& writeConcern,
- unordered_set<RoleName>* rolesToDrop,
- const BSONObj roleObj) {
- RoleName roleName = extractRoleNameFromBSON(roleObj);
- if (!db.empty() && roleName.getDB() != db) {
- return;
- }
+ Status status = queryAuthzDocument(
+ txn,
+ NamespaceString(usersCollName),
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
+ BSONObj(),
+ stdx::bind(&CmdMergeAuthzCollections::addUser,
+ txn,
+ authzManager,
+ db,
+ drop,
+ writeConcern,
+ &usersToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
+ }
- if (update && rolesToDrop->count(roleName)) {
- auditCreateOrUpdateRole(roleObj, false);
- Status status = updateRoleDocument(txn,
- roleName,
- roleObj,
- writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not update role " << roleName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- } else {
- auditCreateOrUpdateRole(roleObj, true);
- Status status = insertRoleDocument(txn, roleObj, writeConcern);
- if (!status.isOK()) {
- // Match the behavior of mongorestore to continue on failure
- warning() << "Could not insert role " << roleName <<
- " in _mergeAuthzCollections command: " << status << endl;
- }
- }
- rolesToDrop->erase(roleName);
- }
-
- /**
- * Moves all user objects from usersCollName into admin.system.users. If drop is true,
- * removes any users that were in admin.system.users but not in usersCollName.
- */
- Status processUsers(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData usersCollName,
- StringData db,
- bool drop,
- const BSONObj& writeConcern) {
- // When the "drop" argument has been provided, we use this set to store the users
- // that are currently in the system, and remove from it as we encounter
- // same-named users in the collection we are restoring from. Once we've fully
- // moved over the temp users collection into its final location, we drop
- // any users that previously existed there but weren't in the temp collection.
- // This is so that we can completely replace the system.users
- // collection with the users from the temp collection, without removing all
- // users at the beginning and thus potentially locking ourselves out by having
- // no users in the whole system for a time.
- unordered_set<UserName> usersToDrop;
-
- if (drop) {
- // Create map of the users currently in the DB
- BSONObj query = db.empty() ?
- BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
- BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::USER_DB_FIELD_NAME << 1);
-
- Status status = queryAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- query,
- fields,
- stdx::bind(&CmdMergeAuthzCollections::extractAndInsertUserName,
- &usersToDrop,
- stdx::placeholders::_1));
+ if (drop) {
+ int numRemoved;
+ for (unordered_set<UserName>::iterator it = usersToDrop.begin();
+ it != usersToDrop.end();
+ ++it) {
+ const UserName& userName = *it;
+ audit::logDropUser(ClientBasic::getCurrent(), userName);
+ status = removePrivilegeDocuments(txn,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser().toString()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB().toString()),
+ writeConcern,
+ &numRemoved);
if (!status.isOK()) {
return status;
}
+ dassert(numRemoved == 1);
}
+ }
- Status status = queryAuthzDocument(
- txn,
- NamespaceString(usersCollName),
- db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db),
- BSONObj(),
- stdx::bind(&CmdMergeAuthzCollections::addUser,
- txn,
- authzManager,
- db,
- drop,
- writeConcern,
- &usersToDrop,
- stdx::placeholders::_1));
+ return Status::OK();
+ }
+
+ /**
+ * Moves all user objects from usersCollName into admin.system.users. If drop is true,
+ * removes any users that were in admin.system.users but not in usersCollName.
+ */
+ Status processRoles(OperationContext* txn,
+ AuthorizationManager* authzManager,
+ StringData rolesCollName,
+ StringData db,
+ bool drop,
+ const BSONObj& writeConcern) {
+ // When the "drop" argument has been provided, we use this set to store the roles
+ // that are currently in the system, and remove from it as we encounter
+ // same-named roles in the collection we are restoring from. Once we've fully
+ // moved over the temp roles collection into its final location, we drop
+ // any roles that previously existed there but weren't in the temp collection.
+ // This is so that we can completely replace the system.roles
+ // collection with the roles from the temp collection, without removing all
+ // roles at the beginning and thus potentially locking ourselves out.
+ unordered_set<RoleName> rolesToDrop;
+
+ if (drop) {
+ // Create map of the roles currently in the DB
+ BSONObj query =
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
+ BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+
+ Status status =
+ queryAuthzDocument(txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ query,
+ fields,
+ stdx::bind(&CmdMergeAuthzCollections::extractAndInsertRoleName,
+ &rolesToDrop,
+ stdx::placeholders::_1));
if (!status.isOK()) {
return status;
}
+ }
- if (drop) {
- int numRemoved;
- for (unordered_set<UserName>::iterator it = usersToDrop.begin();
- it != usersToDrop.end(); ++it) {
- const UserName& userName = *it;
- audit::logDropUser(ClientBasic::getCurrent(), userName);
- status = removePrivilegeDocuments(
- txn,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME <<
- userName.getUser().toString() <<
- AuthorizationManager::USER_DB_FIELD_NAME <<
- userName.getDB().toString()
- ),
- writeConcern,
- &numRemoved);
- if (!status.isOK()) {
- return status;
- }
- dassert(numRemoved == 1);
- }
- }
-
- return Status::OK();
+ Status status = queryAuthzDocument(
+ txn,
+ NamespaceString(rolesCollName),
+ db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
+ BSONObj(),
+ stdx::bind(&CmdMergeAuthzCollections::addRole,
+ txn,
+ authzManager,
+ db,
+ drop,
+ writeConcern,
+ &rolesToDrop,
+ stdx::placeholders::_1));
+ if (!status.isOK()) {
+ return status;
}
- /**
- * Moves all user objects from usersCollName into admin.system.users. If drop is true,
- * removes any users that were in admin.system.users but not in usersCollName.
- */
- Status processRoles(OperationContext* txn,
- AuthorizationManager* authzManager,
- StringData rolesCollName,
- StringData db,
- bool drop,
- const BSONObj& writeConcern) {
- // When the "drop" argument has been provided, we use this set to store the roles
- // that are currently in the system, and remove from it as we encounter
- // same-named roles in the collection we are restoring from. Once we've fully
- // moved over the temp roles collection into its final location, we drop
- // any roles that previously existed there but weren't in the temp collection.
- // This is so that we can completely replace the system.roles
- // collection with the roles from the temp collection, without removing all
- // roles at the beginning and thus potentially locking ourselves out.
- unordered_set<RoleName> rolesToDrop;
-
- if (drop) {
- // Create map of the roles currently in the DB
- BSONObj query = db.empty() ?
- BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
- BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 <<
- AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
-
- Status status = queryAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- query,
- fields,
- stdx::bind(&CmdMergeAuthzCollections::extractAndInsertRoleName,
- &rolesToDrop,
- stdx::placeholders::_1));
+ if (drop) {
+ int numRemoved;
+ for (unordered_set<RoleName>::iterator it = rolesToDrop.begin();
+ it != rolesToDrop.end();
+ ++it) {
+ const RoleName& roleName = *it;
+ audit::logDropRole(ClientBasic::getCurrent(), roleName);
+ status = removeRoleDocuments(txn,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole().toString()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB().toString()),
+ writeConcern,
+ &numRemoved);
if (!status.isOK()) {
return status;
}
+ dassert(numRemoved == 1);
}
+ }
- Status status = queryAuthzDocument(
- txn,
- NamespaceString(rolesCollName),
- db.empty() ?
- BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db),
- BSONObj(),
- stdx::bind(&CmdMergeAuthzCollections::addRole,
- txn,
- authzManager,
- db,
- drop,
- writeConcern,
- &rolesToDrop,
- stdx::placeholders::_1));
- if (!status.isOK()) {
- return status;
- }
+ return Status::OK();
+ }
- if (drop) {
- int numRemoved;
- for (unordered_set<RoleName>::iterator it = rolesToDrop.begin();
- it != rolesToDrop.end(); ++it) {
- const RoleName& roleName = *it;
- audit::logDropRole(ClientBasic::getCurrent(), roleName);
- status = removeRoleDocuments(
- txn,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME <<
- roleName.getRole().toString() <<
- AuthorizationManager::ROLE_DB_FIELD_NAME <<
- roleName.getDB().toString()
- ),
- writeConcern,
- &numRemoved);
- if (!status.isOK()) {
- return status;
- }
- dassert(numRemoved == 1);
- }
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::MergeAuthzCollectionsArgs args;
+ Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- return Status::OK();
+ if (args.usersCollName.empty() && args.rolesCollName.empty()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::BadValue,
+ "Must provide at least one of \"tempUsersCollection\" and "
+ "\"tempRolescollection\""));
}
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
+ ServiceContext* serviceContext = txn->getClient()->getServiceContext();
+ stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
+ authzDataMutexAcquisitionTimeout);
+ if (!lk) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
+ }
- auth::MergeAuthzCollectionsArgs args;
- Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
+ status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+
+ if (!args.usersCollName.empty()) {
+ Status status = processUsers(
+ txn, authzManager, args.usersCollName, args.db, args.drop, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
+ }
- if (args.usersCollName.empty() && args.rolesCollName.empty()) {
- return appendCommandStatus(
- result, Status(ErrorCodes::BadValue,
- "Must provide at least one of \"tempUsersCollection\" and "
- "\"tempRolescollection\""));
- }
-
- ServiceContext* serviceContext = txn->getClient()->getServiceContext();
- stdx::unique_lock<stdx::timed_mutex> lk(getAuthzDataMutex(serviceContext),
- authzDataMutexAcquisitionTimeout);
- if (!lk) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."));
- }
-
- AuthorizationManager* authzManager = AuthorizationManager::get(serviceContext);
- status = requireAuthSchemaVersion26Final(txn, authzManager);
+ if (!args.rolesCollName.empty()) {
+ Status status = processRoles(
+ txn, authzManager, args.rolesCollName, args.db, args.drop, args.writeConcern);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
-
- if (!args.usersCollName.empty()) {
- Status status = processUsers(txn,
- authzManager,
- args.usersCollName,
- args.db,
- args.drop,
- args.writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
-
- if (!args.rolesCollName.empty()) {
- Status status = processRoles(txn,
- authzManager,
- args.rolesCollName,
- args.db,
- args.drop,
- args.writeConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- }
-
- return true;
}
- } cmdMergeAuthzCollections;
+ return true;
+ }
+
+} cmdMergeAuthzCollections;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands.h b/src/mongo/db/commands/user_management_commands.h
index 6b67b8f7891..63c7a7d8d6c 100644
--- a/src/mongo/db/commands/user_management_commands.h
+++ b/src/mongo/db/commands/user_management_commands.h
@@ -38,112 +38,109 @@
namespace mongo {
- class AuthorizationManager;
- class AuthorizationSession;
- struct BSONArray;
- class BSONObj;
- class ClientBasic;
- class OperationContext;
+class AuthorizationManager;
+class AuthorizationSession;
+struct BSONArray;
+class BSONObj;
+class ClientBasic;
+class OperationContext;
namespace auth {
- /**
- * Looks for a field name "pwd" in the given BSONObj and if found replaces its contents with the
- * string "xxx" so that password data on the command object used in executing a user management
- * command isn't exposed in the logs.
- */
- void redactPasswordData(mutablebson::Element parent);
+/**
+ * Looks for a field name "pwd" in the given BSONObj and if found replaces its contents with the
+ * string "xxx" so that password data on the command object used in executing a user management
+ * command isn't exposed in the logs.
+ */
+void redactPasswordData(mutablebson::Element parent);
- //
- // checkAuthorizedTo* methods
- //
+//
+// checkAuthorizedTo* methods
+//
- Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles);
+Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles);
- Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges);
+Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges);
- Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles);
+Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles);
- Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges);
+Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges);
- //
- // checkAuthFor*Command methods
- //
+//
+// checkAuthFor*Command methods
+//
- Status checkAuthForCreateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForCreateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUpdateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUpdateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForCreateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForCreateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUpdateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUpdateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname);
+Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client, const std::string& dbname);
- Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForDropUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForDropRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForUsersInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForUsersInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname);
+Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client, const std::string& dbname);
- Status checkAuthForRolesInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj);
+Status checkAuthForRolesInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
- Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client);
+Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client);
- Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client);
+Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client);
- Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client,
- const BSONObj& cmdObj);
+Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client, const BSONObj& cmdObj);
-} // namespace auth
-} // namespace mongo
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 9e2210a92d5..64aee9eca51 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -52,525 +52,476 @@
namespace mongo {
namespace auth {
- void redactPasswordData(mutablebson::Element parent) {
- namespace mmb = mutablebson;
- const StringData pwdFieldName("pwd", StringData::LiteralTag());
- for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName);
- pwdElement.ok();
- pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
-
- pwdElement.setValueString("xxx");
- }
- }
-
- Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles) {
- for (size_t i = 0; i < roles.size(); ++i) {
- if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to grant role: "
- << roles[i].getFullName());
- }
- }
-
- return Status::OK();
+void redactPasswordData(mutablebson::Element parent) {
+ namespace mmb = mutablebson;
+ const StringData pwdFieldName("pwd", StringData::LiteralTag());
+ for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName); pwdElement.ok();
+ pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
+ pwdElement.setValueString("xxx");
}
+}
- Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- Status status = authzSession->checkAuthorizedToGrantPrivilege(*it);
- if (!status.isOK()) {
- return status;
- }
- }
-
- return Status::OK();
- }
-
- Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
- const std::vector<RoleName>& roles) {
- for (size_t i = 0; i < roles.size(); ++i) {
- if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to revoke role: " <<
- roles[i].getFullName());
- }
+Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles) {
+ for (size_t i = 0; i < roles.size(); ++i) {
+ if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to grant role: " << roles[i].getFullName());
}
- return Status::OK();
}
- Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
- const PrivilegeVector& privileges) {
- for (PrivilegeVector::const_iterator it = privileges.begin();
- it != privileges.end(); ++it) {
- Status status = authzSession->checkAuthorizedToRevokePrivilege(*it);
- if (!status.isOK()) {
- return status;
- }
- }
-
- return Status::OK();
- }
+ return Status::OK();
+}
- Status checkAuthForCreateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "createUser",
- dbname,
- &args);
+Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ Status status = authzSession->checkAuthorizedToGrantPrivilege(*it);
if (!status.isOK()) {
return status;
}
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::createUser)) {
+ return Status::OK();
+}
+
+Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
+ const std::vector<RoleName>& roles) {
+ for (size_t i = 0; i < roles.size(); ++i) {
+ if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: "
- << args.userName.getDB());
+ str::stream()
+ << "Not authorized to revoke role: " << roles[i].getFullName());
}
-
- return checkAuthorizedToGrantRoles(authzSession, args.roles);
}
+ return Status::OK();
+}
- Status checkAuthForUpdateUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- "updateUser",
- dbname,
- &args);
+Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession,
+ const PrivilegeVector& privileges) {
+ for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) {
+ Status status = authzSession->checkAuthorizedToRevokePrivilege(*it);
if (!status.isOK()) {
return status;
}
+ }
- if (args.hasHashedPassword) {
- if (!authzSession->isAuthorizedToChangeOwnPasswordAsUser(args.userName) &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::changePassword)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to change password of user: "
- << args.userName.getFullName());
- }
- }
-
- if (args.hasCustomData) {
- if (!authzSession->isAuthorizedToChangeOwnCustomDataAsUser(args.userName) &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userName.getDB()),
- ActionType::changeCustomData)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to change customData of user: "
- << args.userName.getFullName());
- }
- }
+ return Status::OK();
+}
+
+Status checkAuthForCreateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "createUser", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- if (args.hasRoles) {
- // You don't know what roles you might be revoking, so require the ability to
- // revoke any role in the system.
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), ActionType::revokeRole)) {
- return Status(ErrorCodes::Unauthorized,
- "In order to use updateUser to set roles array, must be "
- "authorized to revoke any role in the system");
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to create users on db: " << args.userName.getDB());
+ }
- return checkAuthorizedToGrantRoles(authzSession, args.roles);
- }
+ return checkAuthorizedToGrantRoles(authzSession, args.roles);
+}
+
+Status checkAuthForUpdateUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, "updateUser", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- return Status::OK();
- }
-
- Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToUser",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
+ if (args.hasHashedPassword) {
+ if (!authzSession->isAuthorizedToChangeOwnPasswordAsUser(args.userName) &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()),
+ ActionType::changePassword)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to change password of user: "
+ << args.userName.getFullName());
}
-
- return checkAuthorizedToGrantRoles(authzSession, roles);
}
- Status checkAuthForCreateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "createRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return status;
+ if (args.hasCustomData) {
+ if (!authzSession->isAuthorizedToChangeOwnCustomDataAsUser(args.userName) &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.userName.getDB()),
+ ActionType::changeCustomData)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to change customData of user: "
+ << args.userName.getFullName());
}
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.roleName.getDB()),
- ActionType::createRole)) {
+ if (args.hasRoles) {
+ // You don't know what roles you might be revoking, so require the ability to
+ // revoke any role in the system.
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create roles on db: "
- << args.roleName.getDB());
+ "In order to use updateUser to set roles array, must be "
+ "authorized to revoke any role in the system");
}
- status = checkAuthorizedToGrantRoles(authzSession, args.roles);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, args.roles);
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+ return Status::OK();
+}
+
+Status checkAuthForGrantRolesToUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToUser", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForUpdateRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::CreateOrUpdateRoleArgs args;
- Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj,
- "updateRole",
- dbname,
- &args);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, roles);
+}
+
+Status checkAuthForCreateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "createRole", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- // You don't know what roles or privileges you might be revoking, so require the ability
- // to revoke any role (or privilege) in the system.
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), ActionType::revokeRole)) {
- return Status(ErrorCodes::Unauthorized,
- "updateRole command required the ability to revoke any role in the "
- "system");
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(args.roleName.getDB()), ActionType::createRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream()
+ << "Not authorized to create roles on db: " << args.roleName.getDB());
+ }
- status = checkAuthorizedToGrantRoles(authzSession, args.roles);
- if (!status.isOK()) {
- return status;
- }
+ status = checkAuthorizedToGrantRoles(authzSession, args.roles);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
- }
-
- Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "grantRolesToRole",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+}
+
+Status checkAuthForUpdateRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::CreateOrUpdateRoleArgs args;
+ Status status = auth::parseCreateOrUpdateRoleCommands(cmdObj, "updateRole", dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToGrantRoles(authzSession, roles);
- }
-
- Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- PrivilegeVector privileges;
- RoleName unusedRoleName;
- BSONObj unusedWriteConcern;
- Status status =
- auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
- "grantPrivilegesToRole",
- dbname,
- &unusedRoleName,
- &privileges,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ // You don't know what roles or privileges you might be revoking, so require the ability
+ // to revoke any role (or privilege) in the system.
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ ActionType::revokeRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ "updateRole command required the ability to revoke any role in the "
+ "system");
+ }
- return checkAuthorizedToGrantPrivileges(authzSession, privileges);
+ status = checkAuthorizedToGrantRoles(authzSession, args.roles);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- UserName userName;
- BSONObj unusedWriteConcern;
- Status status = auth::parseAndValidateDropUserCommand(cmdObj,
- dbname,
- &userName,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantPrivileges(authzSession, args.privileges);
+}
+
+Status checkAuthForGrantRolesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "grantRolesToRole", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(userName.getDB()), ActionType::dropUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the "
- << userName.getDB() << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForDropRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- RoleName roleName;
- BSONObj unusedWriteConcern;
- Status status = auth::parseDropRoleCommand(cmdObj,
- dbname,
- &roleName,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToGrantRoles(authzSession, roles);
+}
+
+Status checkAuthForGrantPrivilegesToRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ PrivilegeVector privileges;
+ RoleName unusedRoleName;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseAndValidateRolePrivilegeManipulationCommands(
+ cmdObj, "grantPrivilegesToRole", dbname, &unusedRoleName, &privileges, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(roleName.getDB()), ActionType::dropRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the "
- << roleName.getDB() << " database");
- }
- return Status::OK();
+ return checkAuthorizedToGrantPrivileges(authzSession, privileges);
+}
+
+Status checkAuthForDropUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ UserName userName;
+ BSONObj unusedWriteConcern;
+ Status status =
+ auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::dropUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop users from the "
- << dbname << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromUser",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(userName.getDB()), ActionType::dropUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop users from the " << userName.getDB()
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForDropRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ RoleName roleName;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseDropRoleCommand(cmdObj, dbname, &roleName, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokeRoles(authzSession, roles);
- }
-
- Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- std::vector<RoleName> roles;
- std::string unusedUserNameString;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- "revokeRolesFromRole",
- dbname,
- &unusedUserNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(roleName.getDB()), ActionType::dropRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop roles from the " << roleName.getDB()
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForDropAllUsersFromDatabaseCommand(ClientBasic* client, const std::string& dbname) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::dropUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop users from the " << dbname
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRevokeRolesFromUserCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromUser", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokeRoles(authzSession, roles);
+ return checkAuthorizedToRevokeRoles(authzSession, roles);
+}
+
+Status checkAuthForRevokeRolesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ std::vector<RoleName> roles;
+ std::string unusedUserNameString;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, "revokeRolesFromRole", dbname, &unusedUserNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForUsersInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::UsersInfoArgs args;
- Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return status;
- }
+ return checkAuthorizedToRevokeRoles(authzSession, roles);
+}
+
+Status checkAuthForUsersInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::UsersInfoArgs args;
+ Status status = auth::parseUsersInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return status;
+ }
- if (args.allForDB) {
+ if (args.allForDB) {
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to view users from the " << dbname
+ << " database");
+ }
+ } else {
+ for (size_t i = 0; i < args.userNames.size(); ++i) {
+ if (authzSession->lookupUser(args.userNames[i])) {
+ continue; // Can always view users you are logged in as
+ }
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::viewUser)) {
+ ResourcePattern::forDatabaseName(args.userNames[i].getDB()),
+ ActionType::viewUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the "
- << dbname << " database");
- }
- } else {
- for (size_t i = 0; i < args.userNames.size(); ++i) {
- if (authzSession->lookupUser(args.userNames[i])) {
- continue; // Can always view users you are logged in as
- }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.userNames[i].getDB()),
- ActionType::viewUser)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view users from the "
- << dbname << " database");
- }
+ str::stream() << "Not authorized to view users from the " << dbname
+ << " database");
}
}
- return Status::OK();
- }
-
- Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- PrivilegeVector privileges;
- RoleName unusedRoleName;
- BSONObj unusedWriteConcern;
- Status status =
- auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
- "revokePrivilegesFromRole",
- dbname,
- &unusedRoleName,
- &privileges,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return status;
- }
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRevokePrivilegesFromRoleCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ PrivilegeVector privileges;
+ RoleName unusedRoleName;
+ BSONObj unusedWriteConcern;
+ Status status =
+ auth::parseAndValidateRolePrivilegeManipulationCommands(cmdObj,
+ "revokePrivilegesFromRole",
+ dbname,
+ &unusedRoleName,
+ &privileges,
+ &unusedWriteConcern);
+ if (!status.isOK()) {
+ return status;
+ }
- return checkAuthorizedToRevokePrivileges(authzSession, privileges);
+ return checkAuthorizedToRevokePrivileges(authzSession, privileges);
+}
+
+Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client, const std::string& dbname) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::dropRole)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to drop roles from the " << dbname
+ << " database");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForRolesInfoCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ auth::RolesInfoArgs args;
+ Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForDropAllRolesFromDatabaseCommand(ClientBasic* client,
- const std::string& dbname) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (args.allForDB) {
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::dropRole)) {
+ ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to drop roles from the "
- << dbname << " database");
- }
- return Status::OK();
- }
-
- Status checkAuthForRolesInfoCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- auth::RolesInfoArgs args;
- Status status = auth::parseRolesInfoCommand(cmdObj, dbname, &args);
- if (!status.isOK()) {
- return status;
+ str::stream() << "Not authorized to view roles from the " << dbname
+ << " database");
}
+ } else {
+ for (size_t i = 0; i < args.roleNames.size(); ++i) {
+ if (authzSession->isAuthenticatedAsUserWithRole(args.roleNames[i])) {
+ continue; // Can always see roles that you are a member of
+ }
- if (args.allForDB) {
if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname), ActionType::viewRole)) {
+ ResourcePattern::forDatabaseName(args.roleNames[i].getDB()),
+ ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << dbname << " database");
- }
- } else {
- for (size_t i = 0; i < args.roleNames.size(); ++i) {
- if (authzSession->isAuthenticatedAsUserWithRole(args.roleNames[i])) {
- continue; // Can always see roles that you are a member of
- }
-
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(args.roleNames[i].getDB()),
- ActionType::viewRole)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB() << " database");
- }
+ << args.roleNames[i].getDB() << " database");
}
}
-
- return Status::OK();
}
- Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::invalidateUserCache)) {
- return Status(ErrorCodes::Unauthorized, "Not authorized to invalidate user cache");
- }
- return Status::OK();
- }
+ return Status::OK();
+}
- Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Not authorized to get cache generation");
- }
- return Status::OK();
+Status checkAuthForInvalidateUserCacheCommand(ClientBasic* client) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::invalidateUserCache)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized to invalidate user cache");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForGetUserCacheGenerationCommand(ClientBasic* client) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized to get cache generation");
+ }
+ return Status::OK();
+}
+
+Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client, const BSONObj& cmdObj) {
+ auth::MergeAuthzCollectionsArgs args;
+ Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
+ if (!status.isOK()) {
+ return status;
}
- Status checkAuthForMergeAuthzCollectionsCommand(ClientBasic* client,
- const BSONObj& cmdObj) {
- auth::MergeAuthzCollectionsArgs args;
- Status status = auth::parseMergeAuthzCollectionsCommand(cmdObj, &args);
- if (!status.isOK()) {
- return status;
- }
-
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ActionSet actions;
- actions.addAction(ActionType::createUser);
- actions.addAction(ActionType::createRole);
- actions.addAction(ActionType::grantRole);
- actions.addAction(ActionType::revokeRole);
- if (args.drop) {
- actions.addAction(ActionType::dropUser);
- actions.addAction(ActionType::dropRole);
- }
- if (!authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forAnyNormalResource(), actions)) {
- return Status(ErrorCodes::Unauthorized,
- "Not authorized to update user/role data using _mergeAuthzCollections"
- " command");
- }
- if (!args.usersCollName.empty() &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(args.usersCollName)),
- ActionType::find)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to read "
- << args.usersCollName);
- }
- if (!args.rolesCollName.empty() &&
- !authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(args.rolesCollName)),
- ActionType::find)) {
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to read "
- << args.rolesCollName);
- }
- return Status::OK();
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ActionSet actions;
+ actions.addAction(ActionType::createUser);
+ actions.addAction(ActionType::createRole);
+ actions.addAction(ActionType::grantRole);
+ actions.addAction(ActionType::revokeRole);
+ if (args.drop) {
+ actions.addAction(ActionType::dropUser);
+ actions.addAction(ActionType::dropRole);
+ }
+ if (!authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(),
+ actions)) {
+ return Status(ErrorCodes::Unauthorized,
+ "Not authorized to update user/role data using _mergeAuthzCollections"
+ " command");
+ }
+ if (!args.usersCollName.empty() &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(args.usersCollName)),
+ ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to read " << args.usersCollName);
+ }
+ if (!args.rolesCollName.empty() &&
+ !authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(args.rolesCollName)),
+ ActionType::find)) {
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to read " << args.rolesCollName);
}
+ return Status::OK();
+}
-} // namespace auth
-} // namespace mongo
+} // namespace auth
+} // namespace mongo
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index 5fc7a871de0..41d5b4afdb8 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -41,81 +41,87 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::stringstream;
+using std::endl;
+using std::string;
+using std::stringstream;
+
+class ValidateCmd : public Command {
+public:
+ ValidateCmd() : Command("validate") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual void help(stringstream& h) const {
+ h << "Validate contents of a namespace by scanning its data structures for correctness. "
+ "Slow.\n"
+ "Add full:true option to do a more thorough check";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::validate);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+
+ NamespaceString ns_string(ns);
+ const bool full = cmdObj["full"].trueValue();
+ const bool scanData = full || cmdObj["scandata"].trueValue();
+
+ if (!ns_string.isNormal() && full) {
+ errmsg = "Can only run full validate on a regular collection";
+ return false;
+ }
- class ValidateCmd : public Command {
- public:
- ValidateCmd() : Command( "validate" ) {}
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: validate " << ns << endl;
+ }
- virtual bool slaveOk() const {
- return true;
+ AutoGetDb ctx(txn, ns_string.db(), MODE_IX);
+ Lock::CollectionLock collLk(txn->lockState(), ns_string.ns(), MODE_X);
+ Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(ns_string) : NULL;
+ if (!collection) {
+ errmsg = "ns not found";
+ return false;
}
- virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow.\n"
- "Add full:true option to do a more thorough check"; }
+ result.append("ns", ns);
+
+ ValidateResults results;
+ Status status = collection->validate(txn, full, scanData, &results, &result);
+ if (!status.isOK())
+ return appendCommandStatus(result, status);
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::validate);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ result.appendBool("valid", results.valid);
+ result.append("errors", results.errors);
+
+ if (!full) {
+ result.append(
+ "warning",
+ "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
}
- //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
-
- NamespaceString ns_string(ns);
- const bool full = cmdObj["full"].trueValue();
- const bool scanData = full || cmdObj["scandata"].trueValue();
-
- if ( !ns_string.isNormal() && full ) {
- errmsg = "Can only run full validate on a regular collection";
- return false;
- }
-
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: validate " << ns << endl;
- }
-
- AutoGetDb ctx(txn, ns_string.db(), MODE_IX);
- Lock::CollectionLock collLk(txn->lockState(), ns_string.ns(), MODE_X);
- Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(ns_string) : NULL;
- if ( !collection ) {
- errmsg = "ns not found";
- return false;
- }
-
- result.append( "ns", ns );
-
- ValidateResults results;
- Status status = collection->validate( txn, full, scanData, &results, &result );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- result.appendBool("valid", results.valid);
- result.append("errors", results.errors);
-
- if ( !full ){
- result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
- }
-
- if ( !results.valid ) {
- result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery");
- }
-
- return true;
+
+ if (!results.valid) {
+ result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery");
}
- } validateCmd;
+ return true;
+ }
+} validateCmd;
}
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 0b277ddfa56..2087b5bd2f4 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -83,303 +83,290 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- namespace {
-
- /**
- * Data structure to safely hold and clean up results of single write operations.
- */
- class WriteOpResult {
- MONGO_DISALLOW_COPYING(WriteOpResult);
- public:
- WriteOpResult() {}
-
- WriteOpStats& getStats() { return _stats; }
-
- WriteErrorDetail* getError() { return _error.get(); }
- WriteErrorDetail* releaseError() { return _error.release(); }
- void setError(WriteErrorDetail* error) { _error.reset(error); }
-
- private:
- WriteOpStats _stats;
- std::unique_ptr<WriteErrorDetail> _error;
- };
-
- } // namespace
-
- // TODO: Determine queueing behavior we want here
- MONGO_EXPORT_SERVER_PARAMETER( queueForMigrationCommit, bool, true );
-
- using mongoutils::str::stream;
-
- WriteBatchExecutor::WriteBatchExecutor( OperationContext* txn,
- OpCounters* opCounters,
- LastError* le ) :
- _txn(txn),
- _opCounters( opCounters ),
- _le( le ),
- _stats( new WriteBatchStats ) {
- }
+using std::endl;
+using std::string;
+using std::unique_ptr;
+using std::vector;
- static WCErrorDetail* toWriteConcernError( const Status& wcStatus,
- const WriteConcernResult& wcResult ) {
+namespace {
- WCErrorDetail* wcError = new WCErrorDetail;
+/**
+ * Data structure to safely hold and clean up results of single write operations.
+ */
+class WriteOpResult {
+ MONGO_DISALLOW_COPYING(WriteOpResult);
- wcError->setErrCode( wcStatus.code() );
- wcError->setErrMessage( wcStatus.reason() );
- if ( wcResult.wTimedOut )
- wcError->setErrInfo( BSON( "wtimeout" << true ) );
+public:
+ WriteOpResult() {}
- return wcError;
+ WriteOpStats& getStats() {
+ return _stats;
}
- static WriteErrorDetail* toWriteError( const Status& status ) {
-
- WriteErrorDetail* error = new WriteErrorDetail;
-
- // TODO: Complex transform here?
- error->setErrCode( status.code() );
- error->setErrMessage( status.reason() );
-
- return error;
+ WriteErrorDetail* getError() {
+ return _error.get();
}
-
- static void toBatchError( const Status& status, BatchedCommandResponse* response ) {
- response->clear();
- response->setErrCode( status.code() );
- response->setErrMessage( status.reason() );
- response->setOk( false );
- dassert( response->isValid(NULL) );
+ WriteErrorDetail* releaseError() {
+ return _error.release();
}
-
- static void noteInCriticalSection( WriteErrorDetail* staleError ) {
- BSONObjBuilder builder;
- if ( staleError->isErrInfoSet() )
- builder.appendElements( staleError->getErrInfo() );
- builder.append( "inCriticalSection", true );
- staleError->setErrInfo( builder.obj() );
+ void setError(WriteErrorDetail* error) {
+ _error.reset(error);
}
- // static
- Status WriteBatchExecutor::validateBatch( const BatchedCommandRequest& request ) {
-
- // Validate namespace
- const NamespaceString& nss = request.getNSS();
- if ( !nss.isValid() ) {
- return Status( ErrorCodes::InvalidNamespace,
- nss.ns() + " is not a valid namespace" );
- }
-
- // Make sure we can write to the namespace
- Status allowedStatus = userAllowedWriteNS( nss );
- if ( !allowedStatus.isOK() ) {
- return allowedStatus;
- }
-
- // Validate insert index requests
- // TODO: Push insert index requests through createIndex once all upgrade paths support it
- string errMsg;
- if ( request.isInsertIndexRequest() && !request.isValidIndexRequest( &errMsg ) ) {
- return Status( ErrorCodes::InvalidOptions, errMsg );
- }
-
- return Status::OK();
+private:
+ WriteOpStats _stats;
+ std::unique_ptr<WriteErrorDetail> _error;
+};
+
+} // namespace
+
+// TODO: Determine queueing behavior we want here
+MONGO_EXPORT_SERVER_PARAMETER(queueForMigrationCommit, bool, true);
+
+using mongoutils::str::stream;
+
+WriteBatchExecutor::WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le)
+ : _txn(txn), _opCounters(opCounters), _le(le), _stats(new WriteBatchStats) {}
+
+static WCErrorDetail* toWriteConcernError(const Status& wcStatus,
+ const WriteConcernResult& wcResult) {
+ WCErrorDetail* wcError = new WCErrorDetail;
+
+ wcError->setErrCode(wcStatus.code());
+ wcError->setErrMessage(wcStatus.reason());
+ if (wcResult.wTimedOut)
+ wcError->setErrInfo(BSON("wtimeout" << true));
+
+ return wcError;
+}
+
+static WriteErrorDetail* toWriteError(const Status& status) {
+ WriteErrorDetail* error = new WriteErrorDetail;
+
+ // TODO: Complex transform here?
+ error->setErrCode(status.code());
+ error->setErrMessage(status.reason());
+
+ return error;
+}
+
+static void toBatchError(const Status& status, BatchedCommandResponse* response) {
+ response->clear();
+ response->setErrCode(status.code());
+ response->setErrMessage(status.reason());
+ response->setOk(false);
+ dassert(response->isValid(NULL));
+}
+
+static void noteInCriticalSection(WriteErrorDetail* staleError) {
+ BSONObjBuilder builder;
+ if (staleError->isErrInfoSet())
+ builder.appendElements(staleError->getErrInfo());
+ builder.append("inCriticalSection", true);
+ staleError->setErrInfo(builder.obj());
+}
+
+// static
+Status WriteBatchExecutor::validateBatch(const BatchedCommandRequest& request) {
+ // Validate namespace
+ const NamespaceString& nss = request.getNSS();
+ if (!nss.isValid()) {
+ return Status(ErrorCodes::InvalidNamespace, nss.ns() + " is not a valid namespace");
}
- void WriteBatchExecutor::executeBatch( const BatchedCommandRequest& request,
- BatchedCommandResponse* response ) {
+ // Make sure we can write to the namespace
+ Status allowedStatus = userAllowedWriteNS(nss);
+ if (!allowedStatus.isOK()) {
+ return allowedStatus;
+ }
- // Validate namespace
- Status isValid = validateBatch(request);
- if (!isValid.isOK()) {
- toBatchError( isValid, response );
- return;
- }
+ // Validate insert index requests
+ // TODO: Push insert index requests through createIndex once all upgrade paths support it
+ string errMsg;
+ if (request.isInsertIndexRequest() && !request.isValidIndexRequest(&errMsg)) {
+ return Status(ErrorCodes::InvalidOptions, errMsg);
+ }
- if ( request.sizeWriteOps() == 0u ) {
- toBatchError( Status( ErrorCodes::InvalidLength,
- "no write ops were included in the batch" ),
- response );
- return;
- }
+ return Status::OK();
+}
- // Validate batch size
- if ( request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize ) {
- toBatchError( Status( ErrorCodes::InvalidLength,
- stream() << "exceeded maximum write batch size of "
- << BatchedCommandRequest::kMaxWriteBatchSize ),
- response );
- return;
- }
+void WriteBatchExecutor::executeBatch(const BatchedCommandRequest& request,
+ BatchedCommandResponse* response) {
+ // Validate namespace
+ Status isValid = validateBatch(request);
+ if (!isValid.isOK()) {
+ toBatchError(isValid, response);
+ return;
+ }
- //
- // End validation
- //
+ if (request.sizeWriteOps() == 0u) {
+ toBatchError(Status(ErrorCodes::InvalidLength, "no write ops were included in the batch"),
+ response);
+ return;
+ }
- const WriteConcernOptions& writeConcern = _txn->getWriteConcern();
- bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0
- && writeConcern.syncMode == WriteConcernOptions::NONE;
+ // Validate batch size
+ if (request.sizeWriteOps() > BatchedCommandRequest::kMaxWriteBatchSize) {
+ toBatchError(Status(ErrorCodes::InvalidLength,
+ stream() << "exceeded maximum write batch size of "
+ << BatchedCommandRequest::kMaxWriteBatchSize),
+ response);
+ return;
+ }
- Timer commandTimer;
+ //
+ // End validation
+ //
- OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
- vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();
+ const WriteConcernOptions& writeConcern = _txn->getWriteConcern();
+ bool silentWC = writeConcern.wMode.empty() && writeConcern.wNumNodes == 0 &&
+ writeConcern.syncMode == WriteConcernOptions::NONE;
- OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
- vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();
+ Timer commandTimer;
- //
- // Apply each batch item, possibly bulking some items together in the write lock.
- // Stops on error if batch is ordered.
- //
+ OwnedPointerVector<WriteErrorDetail> writeErrorsOwned;
+ vector<WriteErrorDetail*>& writeErrors = writeErrorsOwned.mutableVector();
- bulkExecute( request, &upserted, &writeErrors );
+ OwnedPointerVector<BatchedUpsertDetail> upsertedOwned;
+ vector<BatchedUpsertDetail*>& upserted = upsertedOwned.mutableVector();
- //
- // Try to enforce the write concern if everything succeeded (unordered or ordered)
- // OR if something succeeded and we're unordered.
- //
+ //
+ // Apply each batch item, possibly bulking some items together in the write lock.
+ // Stops on error if batch is ordered.
+ //
- unique_ptr<WCErrorDetail> wcError;
- bool needToEnforceWC = writeErrors.empty()
- || ( !request.getOrdered()
- && writeErrors.size() < request.sizeWriteOps() );
+ bulkExecute(request, &upserted, &writeErrors);
- if ( needToEnforceWC ) {
- {
- stdx::lock_guard<Client> lk(*_txn->getClient());
- CurOp::get(_txn)->setMessage_inlock( "waiting for write concern" );
- }
+ //
+ // Try to enforce the write concern if everything succeeded (unordered or ordered)
+ // OR if something succeeded and we're unordered.
+ //
- WriteConcernResult res;
- Status status = waitForWriteConcern(
- _txn,
- repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp(),
- &res);
+ unique_ptr<WCErrorDetail> wcError;
+ bool needToEnforceWC = writeErrors.empty() ||
+ (!request.getOrdered() && writeErrors.size() < request.sizeWriteOps());
- if ( !status.isOK() ) {
- wcError.reset( toWriteConcernError( status, res ) );
- }
+ if (needToEnforceWC) {
+ {
+ stdx::lock_guard<Client> lk(*_txn->getClient());
+ CurOp::get(_txn)->setMessage_inlock("waiting for write concern");
}
- //
- // Refresh metadata if needed
- //
+ WriteConcernResult res;
+ Status status = waitForWriteConcern(
+ _txn, repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp(), &res);
- bool staleBatch = !writeErrors.empty()
- && writeErrors.back()->getErrCode() == ErrorCodes::StaleShardVersion;
-
- if ( staleBatch ) {
+ if (!status.isOK()) {
+ wcError.reset(toWriteConcernError(status, res));
+ }
+ }
- const BatchedRequestMetadata* requestMetadata = request.getMetadata();
- dassert( requestMetadata );
+ //
+ // Refresh metadata if needed
+ //
- // Make sure our shard name is set or is the same as what was set previously
- if ( shardingState.setShardName( requestMetadata->getShardName() ) ) {
+ bool staleBatch =
+ !writeErrors.empty() && writeErrors.back()->getErrCode() == ErrorCodes::StaleShardVersion;
+
+ if (staleBatch) {
+ const BatchedRequestMetadata* requestMetadata = request.getMetadata();
+ dassert(requestMetadata);
+
+ // Make sure our shard name is set or is the same as what was set previously
+ if (shardingState.setShardName(requestMetadata->getShardName())) {
+ //
+ // First, we refresh metadata if we need to based on the requested version.
+ //
+
+ ChunkVersion latestShardVersion;
+ shardingState.refreshMetadataIfNeeded(_txn,
+ request.getTargetingNS(),
+ requestMetadata->getShardVersion(),
+ &latestShardVersion);
+
+ // Report if we're still changing our metadata
+ // TODO: Better reporting per-collection
+ if (shardingState.inCriticalMigrateSection()) {
+ noteInCriticalSection(writeErrors.back());
+ }
+ if (queueForMigrationCommit) {
//
- // First, we refresh metadata if we need to based on the requested version.
+ // Queue up for migration to end - this allows us to be sure that clients will
+ // not repeatedly try to refresh metadata that is not yet written to the config
+ // server. Not necessary for correctness.
+ // Exposed as optional parameter to allow testing of queuing behavior with
+ // different network timings.
//
- ChunkVersion latestShardVersion;
- shardingState.refreshMetadataIfNeeded( _txn,
- request.getTargetingNS(),
- requestMetadata->getShardVersion(),
- &latestShardVersion );
-
- // Report if we're still changing our metadata
- // TODO: Better reporting per-collection
- if ( shardingState.inCriticalMigrateSection() ) {
- noteInCriticalSection( writeErrors.back() );
- }
-
- if ( queueForMigrationCommit ) {
-
- //
- // Queue up for migration to end - this allows us to be sure that clients will
- // not repeatedly try to refresh metadata that is not yet written to the config
- // server. Not necessary for correctness.
- // Exposed as optional parameter to allow testing of queuing behavior with
- // different network timings.
- //
-
- const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();
+ const ChunkVersion& requestShardVersion = requestMetadata->getShardVersion();
- //
- // Only wait if we're an older version (in the current collection epoch) and
- // we're not write compatible, implying that the current migration is affecting
- // writes.
- //
-
- if ( requestShardVersion.isOlderThan( latestShardVersion ) &&
- !requestShardVersion.isWriteCompatibleWith( latestShardVersion ) ) {
-
- while ( shardingState.inCriticalMigrateSection() ) {
+ //
+ // Only wait if we're an older version (in the current collection epoch) and
+ // we're not write compatible, implying that the current migration is affecting
+ // writes.
+ //
- log() << "write request to old shard version "
- << requestMetadata->getShardVersion().toString()
- << " waiting for migration commit" << endl;
+ if (requestShardVersion.isOlderThan(latestShardVersion) &&
+ !requestShardVersion.isWriteCompatibleWith(latestShardVersion)) {
+ while (shardingState.inCriticalMigrateSection()) {
+ log() << "write request to old shard version "
+ << requestMetadata->getShardVersion().toString()
+ << " waiting for migration commit" << endl;
- shardingState.waitTillNotInCriticalSection( 10 /* secs */);
- }
+ shardingState.waitTillNotInCriticalSection(10 /* secs */);
}
}
}
- else {
- // If our shard name is stale, our version must have been stale as well
- dassert( writeErrors.size() == request.sizeWriteOps() );
- }
+ } else {
+ // If our shard name is stale, our version must have been stale as well
+ dassert(writeErrors.size() == request.sizeWriteOps());
}
+ }
- //
- // Construct response
- //
-
- response->setOk( true );
+ //
+ // Construct response
+ //
- if ( !silentWC ) {
+ response->setOk(true);
- if ( upserted.size() ) {
- response->setUpsertDetails( upserted );
- }
+ if (!silentWC) {
+ if (upserted.size()) {
+ response->setUpsertDetails(upserted);
+ }
- if ( writeErrors.size() ) {
- response->setErrDetails( writeErrors );
- }
+ if (writeErrors.size()) {
+ response->setErrDetails(writeErrors);
+ }
- if ( wcError.get() ) {
- response->setWriteConcernError( wcError.release() );
- }
+ if (wcError.get()) {
+ response->setWriteConcernError(wcError.release());
+ }
- repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
- const repl::ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode();
- if (replMode != repl::ReplicationCoordinator::modeNone) {
- response->setLastOp(repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp()
- .getTimestamp());
- if (replMode == repl::ReplicationCoordinator::modeReplSet) {
- response->setElectionId(replCoord->getElectionId());
- }
+ repl::ReplicationCoordinator* replCoord = repl::getGlobalReplicationCoordinator();
+ const repl::ReplicationCoordinator::Mode replMode = replCoord->getReplicationMode();
+ if (replMode != repl::ReplicationCoordinator::modeNone) {
+ response->setLastOp(
+ repl::ReplClientInfo::forClient(_txn->getClient()).getLastOp().getTimestamp());
+ if (replMode == repl::ReplicationCoordinator::modeReplSet) {
+ response->setElectionId(replCoord->getElectionId());
}
-
- // Set the stats for the response
- response->setN( _stats->numInserted + _stats->numUpserted + _stats->numMatched
- + _stats->numDeleted );
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update )
- response->setNModified( _stats->numModified );
}
- dassert( response->isValid( NULL ) );
+ // Set the stats for the response
+ response->setN(_stats->numInserted + _stats->numUpserted + _stats->numMatched +
+ _stats->numDeleted);
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Update)
+ response->setNModified(_stats->numModified);
}
- // Translates write item type to wire protocol op code.
- // Helper for WriteBatchExecutor::applyWriteItem().
- static int getOpCode(const BatchItemRef& currWrite) {
- switch (currWrite.getRequest()->getBatchType()) {
+ dassert(response->isValid(NULL));
+}
+
+// Translates write item type to wire protocol op code.
+// Helper for WriteBatchExecutor::applyWriteItem().
+static int getOpCode(const BatchItemRef& currWrite) {
+ switch (currWrite.getRequest()->getBatchType()) {
case BatchedCommandRequest::BatchType_Insert:
return dbInsert;
case BatchedCommandRequest::BatchType_Update:
@@ -388,1068 +375,990 @@ namespace mongo {
return dbDelete;
default:
MONGO_UNREACHABLE;
+ }
+}
+
+static void buildStaleError(const ChunkVersion& shardVersionRecvd,
+ const ChunkVersion& shardVersionWanted,
+ WriteErrorDetail* error) {
+ // Write stale error to results
+ error->setErrCode(ErrorCodes::StaleShardVersion);
+
+ BSONObjBuilder infoB;
+ shardVersionWanted.addToBSON(infoB, "vWanted");
+ error->setErrInfo(infoB.obj());
+
+ string errMsg = stream() << "stale shard version detected before write, received "
+ << shardVersionRecvd.toString() << " but local version is "
+ << shardVersionWanted.toString();
+ error->setErrMessage(errMsg);
+}
+
+static bool checkShardVersion(OperationContext* txn,
+ ShardingState* shardingState,
+ const BatchedCommandRequest& request,
+ WriteOpResult* result) {
+ const NamespaceString& nss = request.getTargetingNSS();
+ dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
+
+ ChunkVersion requestShardVersion =
+ request.isMetadataSet() && request.getMetadata()->isShardVersionSet()
+ ? request.getMetadata()->getShardVersion()
+ : ChunkVersion::IGNORED();
+
+ if (shardingState->enabled()) {
+ CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
+
+ if (!ChunkVersion::isIgnoredVersion(requestShardVersion)) {
+ ChunkVersion shardVersion =
+ metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
+
+ if (!requestShardVersion.isWriteCompatibleWith(shardVersion)) {
+ result->setError(new WriteErrorDetail);
+ buildStaleError(requestShardVersion, shardVersion, result->getError());
+ return false;
+ }
}
}
- static void buildStaleError( const ChunkVersion& shardVersionRecvd,
- const ChunkVersion& shardVersionWanted,
- WriteErrorDetail* error ) {
-
- // Write stale error to results
- error->setErrCode( ErrorCodes::StaleShardVersion );
+ return true;
+}
- BSONObjBuilder infoB;
- shardVersionWanted.addToBSON( infoB, "vWanted" );
- error->setErrInfo( infoB.obj() );
-
- string errMsg = stream() << "stale shard version detected before write, received "
- << shardVersionRecvd.toString() << " but local version is "
- << shardVersionWanted.toString();
- error->setErrMessage( errMsg );
+static bool checkIsMasterForDatabase(const NamespaceString& ns, WriteOpResult* result) {
+ if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
+ WriteErrorDetail* errorDetail = new WriteErrorDetail;
+ result->setError(errorDetail);
+ errorDetail->setErrCode(ErrorCodes::NotMaster);
+ errorDetail->setErrMessage("Not primary while writing to " + ns.toString());
+ return false;
}
-
- static bool checkShardVersion(OperationContext* txn,
+ return true;
+}
+
+static void buildUniqueIndexError(const BSONObj& keyPattern,
+ const BSONObj& indexPattern,
+ WriteErrorDetail* error) {
+ error->setErrCode(ErrorCodes::CannotCreateIndex);
+ string errMsg = stream() << "cannot create unique index over " << indexPattern
+ << " with shard key pattern " << keyPattern;
+ error->setErrMessage(errMsg);
+}
+
+static bool checkIndexConstraints(OperationContext* txn,
ShardingState* shardingState,
const BatchedCommandRequest& request,
WriteOpResult* result) {
+ const NamespaceString& nss = request.getTargetingNSS();
+ dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
- const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
-
- ChunkVersion requestShardVersion =
- request.isMetadataSet() && request.getMetadata()->isShardVersionSet() ?
- request.getMetadata()->getShardVersion() : ChunkVersion::IGNORED();
-
- if ( shardingState->enabled() ) {
-
- CollectionMetadataPtr metadata = shardingState->getCollectionMetadata( nss.ns() );
+ if (!request.isUniqueIndexRequest())
+ return true;
- if ( !ChunkVersion::isIgnoredVersion( requestShardVersion ) ) {
+ if (shardingState->enabled()) {
+ CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
- ChunkVersion shardVersion =
- metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
+ if (metadata) {
+ ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
+ if (!shardKeyPattern.isUniqueIndexCompatible(request.getIndexKeyPattern())) {
+ result->setError(new WriteErrorDetail);
+ buildUniqueIndexError(
+ metadata->getKeyPattern(), request.getIndexKeyPattern(), result->getError());
- if ( !requestShardVersion.isWriteCompatibleWith( shardVersion ) ) {
- result->setError(new WriteErrorDetail);
- buildStaleError(requestShardVersion, shardVersion, result->getError());
- return false;
- }
+ return false;
}
}
-
- return true;
- }
-
- static bool checkIsMasterForDatabase(const NamespaceString& ns, WriteOpResult* result) {
- if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
- WriteErrorDetail* errorDetail = new WriteErrorDetail;
- result->setError(errorDetail);
- errorDetail->setErrCode(ErrorCodes::NotMaster);
- errorDetail->setErrMessage("Not primary while writing to " + ns.toString());
- return false;
- }
- return true;
}
- static void buildUniqueIndexError( const BSONObj& keyPattern,
- const BSONObj& indexPattern,
- WriteErrorDetail* error ) {
- error->setErrCode( ErrorCodes::CannotCreateIndex );
- string errMsg = stream() << "cannot create unique index over " << indexPattern
- << " with shard key pattern " << keyPattern;
- error->setErrMessage( errMsg );
+ return true;
+}
+
+//
+// HELPERS FOR CUROP MANAGEMENT AND GLOBAL STATS
+//
+
+static void beginCurrentOp(OperationContext* txn, const BatchItemRef& currWrite) {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp* const currentOp = CurOp::get(txn);
+ currentOp->setOp_inlock(getOpCode(currWrite));
+ currentOp->ensureStarted();
+ currentOp->setNS_inlock(currWrite.getRequest()->getNS());
+
+ currentOp->debug().ns = currentOp->getNS();
+ currentOp->debug().op = currentOp->getOp();
+
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ currentOp->setQuery_inlock(currWrite.getDocument());
+ currentOp->debug().query = currWrite.getDocument();
+ currentOp->debug().ninserted = 0;
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ currentOp->setQuery_inlock(currWrite.getUpdate()->getQuery());
+ currentOp->debug().query = currWrite.getUpdate()->getQuery();
+ currentOp->debug().updateobj = currWrite.getUpdate()->getUpdateExpr();
+ // Note: debug().nMatched, nModified and nmoved are set internally in update
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ currentOp->setQuery_inlock(currWrite.getDelete()->getQuery());
+ currentOp->debug().query = currWrite.getDelete()->getQuery();
+ currentOp->debug().ndeleted = 0;
}
-
- static bool checkIndexConstraints(OperationContext* txn,
- ShardingState* shardingState,
- const BatchedCommandRequest& request,
- WriteOpResult* result) {
-
- const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
-
- if ( !request.isUniqueIndexRequest() )
- return true;
-
- if ( shardingState->enabled() ) {
-
- CollectionMetadataPtr metadata = shardingState->getCollectionMetadata( nss.ns() );
-
- if ( metadata ) {
- ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
- if (!shardKeyPattern.isUniqueIndexCompatible(request.getIndexKeyPattern())) {
-
- result->setError(new WriteErrorDetail);
- buildUniqueIndexError(metadata->getKeyPattern(),
- request.getIndexKeyPattern(),
- result->getError());
-
- return false;
- }
- }
- }
-
- return true;
+}
+
+void WriteBatchExecutor::incOpStats(const BatchItemRef& currWrite) {
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ _opCounters->gotInsert();
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ _opCounters->gotUpdate();
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ _opCounters->gotDelete();
}
-
- //
- // HELPERS FOR CUROP MANAGEMENT AND GLOBAL STATS
- //
-
- static void beginCurrentOp(OperationContext* txn, const BatchItemRef& currWrite) {
-
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp* const currentOp = CurOp::get(txn);
- currentOp->setOp_inlock(getOpCode(currWrite));
- currentOp->ensureStarted();
- currentOp->setNS_inlock( currWrite.getRequest()->getNS() );
-
- currentOp->debug().ns = currentOp->getNS();
- currentOp->debug().op = currentOp->getOp();
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- currentOp->setQuery_inlock( currWrite.getDocument() );
- currentOp->debug().query = currWrite.getDocument();
- currentOp->debug().ninserted = 0;
+}
+
+void WriteBatchExecutor::incWriteStats(const BatchItemRef& currWrite,
+ const WriteOpStats& stats,
+ const WriteErrorDetail* error,
+ CurOp* currentOp) {
+ if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ _stats->numInserted += stats.n;
+ currentOp->debug().ninserted += stats.n;
+ if (!error) {
+ _le->recordInsert(stats.n);
}
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- currentOp->setQuery_inlock( currWrite.getUpdate()->getQuery() );
- currentOp->debug().query = currWrite.getUpdate()->getQuery();
- currentOp->debug().updateobj = currWrite.getUpdate()->getUpdateExpr();
- // Note: debug().nMatched, nModified and nmoved are set internally in update
+ } else if (currWrite.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ if (stats.upsertedID.isEmpty()) {
+ _stats->numMatched += stats.n;
+ _stats->numModified += stats.nModified;
+ } else {
+ ++_stats->numUpserted;
}
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- currentOp->setQuery_inlock( currWrite.getDelete()->getQuery() );
- currentOp->debug().query = currWrite.getDelete()->getQuery();
- currentOp->debug().ndeleted = 0;
- }
-
- }
- void WriteBatchExecutor::incOpStats( const BatchItemRef& currWrite ) {
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- _opCounters->gotInsert();
- }
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- _opCounters->gotUpdate();
+ if (!error) {
+ _le->recordUpdate(stats.upsertedID.isEmpty() && stats.n > 0, stats.n, stats.upsertedID);
}
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- _opCounters->gotDelete();
+ } else {
+ dassert(currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ _stats->numDeleted += stats.n;
+ if (!error) {
+ _le->recordDelete(stats.n);
}
+ currentOp->debug().ndeleted += stats.n;
}
- void WriteBatchExecutor::incWriteStats( const BatchItemRef& currWrite,
- const WriteOpStats& stats,
- const WriteErrorDetail* error,
- CurOp* currentOp ) {
-
- if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Insert ) {
- _stats->numInserted += stats.n;
- currentOp->debug().ninserted += stats.n;
- if (!error) {
- _le->recordInsert(stats.n);
- }
- }
- else if ( currWrite.getOpType() == BatchedCommandRequest::BatchType_Update ) {
- if ( stats.upsertedID.isEmpty() ) {
- _stats->numMatched += stats.n;
- _stats->numModified += stats.nModified;
- }
- else {
- ++_stats->numUpserted;
- }
-
- if (!error) {
- _le->recordUpdate( stats.upsertedID.isEmpty() && stats.n > 0,
- stats.n,
- stats.upsertedID );
- }
- }
- else {
- dassert( currWrite.getOpType() == BatchedCommandRequest::BatchType_Delete );
- _stats->numDeleted += stats.n;
- if ( !error ) {
- _le->recordDelete( stats.n );
- }
- currentOp->debug().ndeleted += stats.n;
- }
-
- if (error) {
- _le->setLastError(error->getErrCode(), error->getErrMessage().c_str());
- }
+ if (error) {
+ _le->setLastError(error->getErrCode(), error->getErrMessage().c_str());
}
-
- static void finishCurrentOp(OperationContext* txn, WriteErrorDetail* opError) {
-
- CurOp* currentOp = CurOp::get(txn);
- currentOp->done();
- int executionTime = currentOp->debug().executionTime = currentOp->totalTimeMillis();
- recordCurOpMetrics(txn);
- Top::get(txn->getClient()->getServiceContext()).record(
- currentOp->getNS(),
+}
+
+static void finishCurrentOp(OperationContext* txn, WriteErrorDetail* opError) {
+ CurOp* currentOp = CurOp::get(txn);
+ currentOp->done();
+ int executionTime = currentOp->debug().executionTime = currentOp->totalTimeMillis();
+ recordCurOpMetrics(txn);
+ Top::get(txn->getClient()->getServiceContext())
+ .record(currentOp->getNS(),
currentOp->getOp(),
- 1, // "write locked"
+ 1, // "write locked"
currentOp->totalTimeMicros(),
currentOp->isCommand());
- if ( opError ) {
- currentOp->debug().exceptionInfo = ExceptionInfo( opError->getErrMessage(),
- opError->getErrCode() );
+ if (opError) {
+ currentOp->debug().exceptionInfo =
+ ExceptionInfo(opError->getErrMessage(), opError->getErrCode());
- LOG(3) << " Caught Assertion in " << opToString( currentOp->getOp() )
- << ", continuing " << causedBy( opError->getErrMessage() ) << endl;
- }
+ LOG(3) << " Caught Assertion in " << opToString(currentOp->getOp()) << ", continuing "
+ << causedBy(opError->getErrMessage()) << endl;
+ }
- bool logAll = logger::globalLogDomain()->shouldLog(logger::LogComponent::kWrite,
- logger::LogSeverity::Debug(1));
- bool logSlow = executionTime
- > ( serverGlobalParams.slowMS + currentOp->getExpectedLatencyMs() );
+ bool logAll = logger::globalLogDomain()->shouldLog(logger::LogComponent::kWrite,
+ logger::LogSeverity::Debug(1));
+ bool logSlow = executionTime > (serverGlobalParams.slowMS + currentOp->getExpectedLatencyMs());
- if ( logAll || logSlow ) {
- Locker::LockerInfo lockerInfo;
- txn->lockState()->getLockerInfo(&lockerInfo);
+ if (logAll || logSlow) {
+ Locker::LockerInfo lockerInfo;
+ txn->lockState()->getLockerInfo(&lockerInfo);
- LOG(0) << currentOp->debug().report(*currentOp, lockerInfo.stats);
- }
+ LOG(0) << currentOp->debug().report(*currentOp, lockerInfo.stats);
+ }
- if (currentOp->shouldDBProfile(executionTime)) {
- profile(txn, CurOp::get(txn)->getOp());
- }
+ if (currentOp->shouldDBProfile(executionTime)) {
+ profile(txn, CurOp::get(txn)->getOp());
}
+}
- // END HELPERS
+// END HELPERS
- //
- // CORE WRITE OPERATIONS (declaration)
- // These functions write to the database and return stats and zero or one of:
- // - page fault
- // - error
- //
+//
+// CORE WRITE OPERATIONS (declaration)
+// These functions write to the database and return stats and zero or one of:
+// - page fault
+// - error
+//
- static void singleInsert( OperationContext* txn,
- const BSONObj& docToInsert,
- Collection* collection,
- WriteOpResult* result );
+static void singleInsert(OperationContext* txn,
+ const BSONObj& docToInsert,
+ Collection* collection,
+ WriteOpResult* result);
- static void singleCreateIndex( OperationContext* txn,
- const BSONObj& indexDesc,
- WriteOpResult* result );
+static void singleCreateIndex(OperationContext* txn,
+ const BSONObj& indexDesc,
+ WriteOpResult* result);
- static void multiUpdate( OperationContext* txn,
- const BatchItemRef& updateItem,
- WriteOpResult* result );
+static void multiUpdate(OperationContext* txn,
+ const BatchItemRef& updateItem,
+ WriteOpResult* result);
- static void multiRemove( OperationContext* txn,
- const BatchItemRef& removeItem,
- WriteOpResult* result );
+static void multiRemove(OperationContext* txn,
+ const BatchItemRef& removeItem,
+ WriteOpResult* result);
- //
- // WRITE EXECUTION
- // In general, the exec* operations manage db lock state and stats before dispatching to the
- // core write operations, which are *only* responsible for performing a write and reporting
- // success or failure.
- //
+//
+// WRITE EXECUTION
+// In general, the exec* operations manage db lock state and stats before dispatching to the
+// core write operations, which are *only* responsible for performing a write and reporting
+// success or failure.
+//
+
+/**
+ * Representation of the execution state of execInserts. Used by a single
+ * execution of execInserts in a single thread.
+ */
+class WriteBatchExecutor::ExecInsertsState {
+ MONGO_DISALLOW_COPYING(ExecInsertsState);
+public:
/**
- * Representation of the execution state of execInserts. Used by a single
- * execution of execInserts in a single thread.
+ * Constructs a new instance, for performing inserts described in "aRequest".
*/
- class WriteBatchExecutor::ExecInsertsState {
- MONGO_DISALLOW_COPYING(ExecInsertsState);
- public:
- /**
- * Constructs a new instance, for performing inserts described in "aRequest".
- */
- explicit ExecInsertsState(OperationContext* txn,
- const BatchedCommandRequest* aRequest);
-
- /**
- * Acquires the write lock and client context needed to perform the current write operation.
- * Returns true on success, after which it is safe to use the "context" and "collection"
- * members. It is safe to call this function if this instance already holds the write lock.
- *
- * On failure, writeLock, context and collection will be NULL/clear.
- */
- bool lockAndCheck(WriteOpResult* result);
-
- /**
- * Releases the client context and write lock acquired by lockAndCheck. Safe to call
- * regardless of whether or not this state object currently owns the lock.
- */
- void unlock();
-
- /**
- * Returns true if this executor has the lock on the target database.
- */
- bool hasLock() { return _dbLock.get(); }
-
- /**
- * Gets the target collection for the batch operation. Value is undefined
- * unless hasLock() is true.
- */
- Collection* getCollection() { return _collection; }
-
- OperationContext* txn;
-
- // Request object describing the inserts.
- const BatchedCommandRequest* request;
-
- // Index of the current insert operation to perform.
- size_t currIndex = 0;
-
- // Translation of insert documents in "request" into insert-ready forms. This vector has a
- // correspondence with elements of the "request", and "currIndex" is used to
- // index both.
- std::vector<StatusWith<BSONObj> > normalizedInserts;
-
- private:
- bool _lockAndCheckImpl(WriteOpResult* result, bool intentLock);
-
- ScopedTransaction _transaction;
- // Guard object for the write lock on the target database.
- std::unique_ptr<Lock::DBLock> _dbLock;
- std::unique_ptr<Lock::CollectionLock> _collLock;
-
- Database* _database = nullptr;
- Collection* _collection = nullptr;
- };
-
- void WriteBatchExecutor::bulkExecute( const BatchedCommandRequest& request,
- std::vector<BatchedUpsertDetail*>* upsertedIds,
- std::vector<WriteErrorDetail*>* errors ) {
- boost::optional<DisableDocumentValidation> maybeDisableValidation;
- if (request.shouldBypassValidation()) {
- maybeDisableValidation.emplace(_txn);
- }
+ explicit ExecInsertsState(OperationContext* txn, const BatchedCommandRequest* aRequest);
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert ) {
- execInserts( request, errors );
- }
- else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) {
- for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {
+ /**
+ * Acquires the write lock and client context needed to perform the current write operation.
+ * Returns true on success, after which it is safe to use the "context" and "collection"
+ * members. It is safe to call this function if this instance already holds the write lock.
+ *
+ * On failure, writeLock, context and collection will be NULL/clear.
+ */
+ bool lockAndCheck(WriteOpResult* result);
- if ( i + 1 == request.sizeWriteOps() ) {
- setupSynchronousCommit( _txn );
- }
+ /**
+ * Releases the client context and write lock acquired by lockAndCheck. Safe to call
+ * regardless of whether or not this state object currently owns the lock.
+ */
+ void unlock();
- WriteErrorDetail* error = NULL;
- BSONObj upsertedId;
- execUpdate( BatchItemRef( &request, i ), &upsertedId, &error );
+ /**
+ * Returns true if this executor has the lock on the target database.
+ */
+ bool hasLock() {
+ return _dbLock.get();
+ }
- if ( !upsertedId.isEmpty() ) {
- BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail;
- batchUpsertedId->setIndex( i );
- batchUpsertedId->setUpsertedID( upsertedId );
- upsertedIds->push_back( batchUpsertedId );
- }
+ /**
+ * Gets the target collection for the batch operation. Value is undefined
+ * unless hasLock() is true.
+ */
+ Collection* getCollection() {
+ return _collection;
+ }
- if ( error ) {
- errors->push_back( error );
- if ( request.getOrdered() )
- break;
- }
- }
- }
- else {
- dassert( request.getBatchType() == BatchedCommandRequest::BatchType_Delete );
- for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) {
+ OperationContext* txn;
- if ( i + 1 == request.sizeWriteOps() ) {
- setupSynchronousCommit( _txn );
- }
+ // Request object describing the inserts.
+ const BatchedCommandRequest* request;
- WriteErrorDetail* error = NULL;
- execRemove( BatchItemRef( &request, i ), &error );
+ // Index of the current insert operation to perform.
+ size_t currIndex = 0;
- if ( error ) {
- errors->push_back( error );
- if ( request.getOrdered() )
- break;
- }
- }
- }
-
- // Fill in stale version errors for unordered batches (update/delete can't do this on own)
- if ( !errors->empty() && !request.getOrdered() ) {
+ // Translation of insert documents in "request" into insert-ready forms. This vector has a
+ // correspondence with elements of the "request", and "currIndex" is used to
+ // index both.
+ std::vector<StatusWith<BSONObj>> normalizedInserts;
- const WriteErrorDetail* finalError = errors->back();
+private:
+ bool _lockAndCheckImpl(WriteOpResult* result, bool intentLock);
- if ( finalError->getErrCode() == ErrorCodes::StaleShardVersion ) {
- for ( size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++ ) {
- WriteErrorDetail* dupStaleError = new WriteErrorDetail;
- finalError->cloneTo( dupStaleError );
- errors->push_back( dupStaleError );
- }
- }
- }
- }
+ ScopedTransaction _transaction;
+ // Guard object for the write lock on the target database.
+ std::unique_ptr<Lock::DBLock> _dbLock;
+ std::unique_ptr<Lock::CollectionLock> _collLock;
- // Goes over the request and preprocesses normalized versions of all the inserts in the request
- static void normalizeInserts( const BatchedCommandRequest& request,
- vector<StatusWith<BSONObj> >* normalizedInserts ) {
+ Database* _database = nullptr;
+ Collection* _collection = nullptr;
+};
- normalizedInserts->reserve(request.sizeWriteOps());
- for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) {
- BSONObj insertDoc = request.getInsertRequest()->getDocumentsAt( i );
- StatusWith<BSONObj> normalInsert = fixDocumentForInsert( insertDoc );
- normalizedInserts->push_back( normalInsert );
- if ( request.getOrdered() && !normalInsert.isOK() )
- break;
- }
+void WriteBatchExecutor::bulkExecute(const BatchedCommandRequest& request,
+ std::vector<BatchedUpsertDetail*>* upsertedIds,
+ std::vector<WriteErrorDetail*>* errors) {
+ boost::optional<DisableDocumentValidation> maybeDisableValidation;
+ if (request.shouldBypassValidation()) {
+ maybeDisableValidation.emplace(_txn);
}
- void WriteBatchExecutor::execInserts( const BatchedCommandRequest& request,
- std::vector<WriteErrorDetail*>* errors ) {
-
- // Theory of operation:
- //
- // Instantiates an ExecInsertsState, which represents all of the state involved in the batch
- // insert execution algorithm. Most importantly, encapsulates the lock state.
- //
- // Every iteration of the loop in execInserts() processes one document insertion, by calling
- // insertOne() exactly once for a given value of state.currIndex.
- //
- // If the ExecInsertsState indicates that the requisite write locks are not held, insertOne
- // acquires them and performs lock-acquisition-time checks. However, on non-error
- // execution, it does not release the locks. Therefore, the yielding logic in the while
- // loop in execInserts() is solely responsible for lock release in the non-error case.
- //
- // Internally, insertOne loops performing the single insert until it completes without a
- // PageFaultException, or until it fails with some kind of error. Errors are mostly
- // propagated via the request->error field, but DBExceptions or std::exceptions may escape,
- // particularly on operation interruption. These kinds of errors necessarily prevent
- // further insertOne calls, and stop the batch. As a result, the only expected source of
- // such exceptions are interruptions.
- ExecInsertsState state(_txn, &request);
- normalizeInserts(request, &state.normalizedInserts);
-
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- if (request.isMetadataSet() && request.getMetadata()->isShardVersionSet()) {
- info->setVersion(request.getTargetingNS(),
- request.getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(request.getTargetingNS(), ChunkVersion::IGNORED());
- }
- }
-
- // Yield frequency is based on the same constants used by PlanYieldPolicy.
- ElapsedTracker elapsedTracker(internalQueryExecYieldIterations,
- internalQueryExecYieldPeriodMS);
-
- for (state.currIndex = 0;
- state.currIndex < state.request->sizeWriteOps();
- ++state.currIndex) {
-
- if (state.currIndex + 1 == state.request->sizeWriteOps()) {
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Insert) {
+ execInserts(request, errors);
+ } else if (request.getBatchType() == BatchedCommandRequest::BatchType_Update) {
+ for (size_t i = 0; i < request.sizeWriteOps(); i++) {
+ if (i + 1 == request.sizeWriteOps()) {
setupSynchronousCommit(_txn);
}
- if (elapsedTracker.intervalHasElapsed()) {
- // Yield between inserts.
- if (state.hasLock()) {
- // Release our locks. They get reacquired when insertOne() calls
- // ExecInsertsState::lockAndCheck(). Since the lock manager guarantees FIFO
- // queues waiting on locks, there is no need to explicitly sleep or give up
- // control of the processor here.
- state.unlock();
-
- // This releases any storage engine held locks/snapshots.
- _txn->recoveryUnit()->abandonSnapshot();
- }
+ WriteErrorDetail* error = NULL;
+ BSONObj upsertedId;
+ execUpdate(BatchItemRef(&request, i), &upsertedId, &error);
+
+ if (!upsertedId.isEmpty()) {
+ BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail;
+ batchUpsertedId->setIndex(i);
+ batchUpsertedId->setUpsertedID(upsertedId);
+ upsertedIds->push_back(batchUpsertedId);
+ }
- _txn->checkForInterrupt();
- elapsedTracker.resetLastTime();
+ if (error) {
+ errors->push_back(error);
+ if (request.getOrdered())
+ break;
+ }
+ }
+ } else {
+ dassert(request.getBatchType() == BatchedCommandRequest::BatchType_Delete);
+ for (size_t i = 0; i < request.sizeWriteOps(); i++) {
+ if (i + 1 == request.sizeWriteOps()) {
+ setupSynchronousCommit(_txn);
}
WriteErrorDetail* error = NULL;
- execOneInsert(&state, &error);
+ execRemove(BatchItemRef(&request, i), &error);
+
if (error) {
errors->push_back(error);
- error->setIndex(state.currIndex);
if (request.getOrdered())
- return;
+ break;
}
}
}
- void WriteBatchExecutor::execUpdate( const BatchItemRef& updateItem,
- BSONObj* upsertedId,
- WriteErrorDetail** error ) {
-
- // BEGIN CURRENT OP
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, updateItem);
- incOpStats( updateItem );
-
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- auto rootRequest = updateItem.getRequest();
- if (!updateItem.getUpdate()->getMulti() &&
- rootRequest->isMetadataSet() &&
- rootRequest->getMetadata()->isShardVersionSet()) {
- info->setVersion(rootRequest->getTargetingNS(),
- rootRequest->getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
+ // Fill in stale version errors for unordered batches (update/delete can't do this on own)
+ if (!errors->empty() && !request.getOrdered()) {
+ const WriteErrorDetail* finalError = errors->back();
+
+ if (finalError->getErrCode() == ErrorCodes::StaleShardVersion) {
+ for (size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++) {
+ WriteErrorDetail* dupStaleError = new WriteErrorDetail;
+ finalError->cloneTo(dupStaleError);
+ errors->push_back(dupStaleError);
}
}
+ }
+}
+
+// Goes over the request and preprocesses normalized versions of all the inserts in the request
+static void normalizeInserts(const BatchedCommandRequest& request,
+ vector<StatusWith<BSONObj>>* normalizedInserts) {
+ normalizedInserts->reserve(request.sizeWriteOps());
+ for (size_t i = 0; i < request.sizeWriteOps(); ++i) {
+ BSONObj insertDoc = request.getInsertRequest()->getDocumentsAt(i);
+ StatusWith<BSONObj> normalInsert = fixDocumentForInsert(insertDoc);
+ normalizedInserts->push_back(normalInsert);
+ if (request.getOrdered() && !normalInsert.isOK())
+ break;
+ }
+}
- WriteOpResult result;
-
- multiUpdate( _txn, updateItem, &result );
-
- if ( !result.getStats().upsertedID.isEmpty() ) {
- *upsertedId = result.getStats().upsertedID;
+void WriteBatchExecutor::execInserts(const BatchedCommandRequest& request,
+ std::vector<WriteErrorDetail*>* errors) {
+ // Theory of operation:
+ //
+ // Instantiates an ExecInsertsState, which represents all of the state involved in the batch
+ // insert execution algorithm. Most importantly, encapsulates the lock state.
+ //
+ // Every iteration of the loop in execInserts() processes one document insertion, by calling
+ // insertOne() exactly once for a given value of state.currIndex.
+ //
+ // If the ExecInsertsState indicates that the requisite write locks are not held, insertOne
+ // acquires them and performs lock-acquisition-time checks. However, on non-error
+ // execution, it does not release the locks. Therefore, the yielding logic in the while
+ // loop in execInserts() is solely responsible for lock release in the non-error case.
+ //
+ // Internally, insertOne loops performing the single insert until it completes without a
+ // PageFaultException, or until it fails with some kind of error. Errors are mostly
+ // propagated via the request->error field, but DBExceptions or std::exceptions may escape,
+ // particularly on operation interruption. These kinds of errors necessarily prevent
+ // further insertOne calls, and stop the batch. As a result, the only expected source of
+ // such exceptions are interruptions.
+ ExecInsertsState state(_txn, &request);
+ normalizeInserts(request, &state.normalizedInserts);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ if (request.isMetadataSet() && request.getMetadata()->isShardVersionSet()) {
+ info->setVersion(request.getTargetingNS(), request.getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(request.getTargetingNS(), ChunkVersion::IGNORED());
}
- // END CURRENT OP
- incWriteStats( updateItem, result.getStats(), result.getError(), &currentOp );
- finishCurrentOp(_txn, result.getError());
+ }
- // End current transaction and release snapshot.
- _txn->recoveryUnit()->abandonSnapshot();
+ // Yield frequency is based on the same constants used by PlanYieldPolicy.
+ ElapsedTracker elapsedTracker(internalQueryExecYieldIterations, internalQueryExecYieldPeriodMS);
- if ( result.getError() ) {
- result.getError()->setIndex( updateItem.getItemIndex() );
- *error = result.releaseError();
+ for (state.currIndex = 0; state.currIndex < state.request->sizeWriteOps(); ++state.currIndex) {
+ if (state.currIndex + 1 == state.request->sizeWriteOps()) {
+ setupSynchronousCommit(_txn);
}
- }
-
- void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem,
- WriteErrorDetail** error ) {
- // Removes are similar to updates, but page faults are handled externally
+ if (elapsedTracker.intervalHasElapsed()) {
+ // Yield between inserts.
+ if (state.hasLock()) {
+ // Release our locks. They get reacquired when insertOne() calls
+ // ExecInsertsState::lockAndCheck(). Since the lock manager guarantees FIFO
+ // queues waiting on locks, there is no need to explicitly sleep or give up
+ // control of the processor here.
+ state.unlock();
+
+ // This releases any storage engine held locks/snapshots.
+ _txn->recoveryUnit()->abandonSnapshot();
+ }
- // BEGIN CURRENT OP
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, removeItem);
- incOpStats( removeItem );
+ _txn->checkForInterrupt();
+ elapsedTracker.resetLastTime();
+ }
- ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
- if (info) {
- auto rootRequest = removeItem.getRequest();
- if (removeItem.getDelete()->getLimit() == 1 &&
- rootRequest->isMetadataSet() &&
- rootRequest->getMetadata()->isShardVersionSet()) {
- info->setVersion(rootRequest->getTargetingNS(),
- rootRequest->getMetadata()->getShardVersion());
- }
- else {
- info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
- }
+ WriteErrorDetail* error = NULL;
+ execOneInsert(&state, &error);
+ if (error) {
+ errors->push_back(error);
+ error->setIndex(state.currIndex);
+ if (request.getOrdered())
+ return;
+ }
+ }
+}
+
+void WriteBatchExecutor::execUpdate(const BatchItemRef& updateItem,
+ BSONObj* upsertedId,
+ WriteErrorDetail** error) {
+ // BEGIN CURRENT OP
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, updateItem);
+ incOpStats(updateItem);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ auto rootRequest = updateItem.getRequest();
+ if (!updateItem.getUpdate()->getMulti() && rootRequest->isMetadataSet() &&
+ rootRequest->getMetadata()->isShardVersionSet()) {
+ info->setVersion(rootRequest->getTargetingNS(),
+ rootRequest->getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
}
+ }
- WriteOpResult result;
+ WriteOpResult result;
- multiRemove( _txn, removeItem, &result );
+ multiUpdate(_txn, updateItem, &result);
- // END CURRENT OP
- incWriteStats( removeItem, result.getStats(), result.getError(), &currentOp );
- finishCurrentOp(_txn, result.getError());
+ if (!result.getStats().upsertedID.isEmpty()) {
+ *upsertedId = result.getStats().upsertedID;
+ }
+ // END CURRENT OP
+ incWriteStats(updateItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
- // End current transaction and release snapshot.
- _txn->recoveryUnit()->abandonSnapshot();
+ // End current transaction and release snapshot.
+ _txn->recoveryUnit()->abandonSnapshot();
- if ( result.getError() ) {
- result.getError()->setIndex( removeItem.getItemIndex() );
- *error = result.releaseError();
+ if (result.getError()) {
+ result.getError()->setIndex(updateItem.getItemIndex());
+ *error = result.releaseError();
+ }
+}
+
+void WriteBatchExecutor::execRemove(const BatchItemRef& removeItem, WriteErrorDetail** error) {
+ // Removes are similar to updates, but page faults are handled externally
+
+ // BEGIN CURRENT OP
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, removeItem);
+ incOpStats(removeItem);
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get(_txn->getClient(), false);
+ if (info) {
+ auto rootRequest = removeItem.getRequest();
+ if (removeItem.getDelete()->getLimit() == 1 && rootRequest->isMetadataSet() &&
+ rootRequest->getMetadata()->isShardVersionSet()) {
+ info->setVersion(rootRequest->getTargetingNS(),
+ rootRequest->getMetadata()->getShardVersion());
+ } else {
+ info->setVersion(rootRequest->getTargetingNS(), ChunkVersion::IGNORED());
}
}
- //
- // IN-DB-LOCK CORE OPERATIONS
- //
+ WriteOpResult result;
+
+ multiRemove(_txn, removeItem, &result);
+
+ // END CURRENT OP
+ incWriteStats(removeItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
+
+ // End current transaction and release snapshot.
+ _txn->recoveryUnit()->abandonSnapshot();
- WriteBatchExecutor::ExecInsertsState::ExecInsertsState(OperationContext* txn,
- const BatchedCommandRequest* aRequest) :
- txn(txn),
- request(aRequest),
- _transaction(txn, MODE_IX) {
+ if (result.getError()) {
+ result.getError()->setIndex(removeItem.getItemIndex());
+ *error = result.releaseError();
}
+}
- bool WriteBatchExecutor::ExecInsertsState::_lockAndCheckImpl(WriteOpResult* result,
- bool intentLock) {
- if (hasLock()) {
- CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
- return true;
- }
+//
+// IN-DB-LOCK CORE OPERATIONS
+//
- if (request->isInsertIndexRequest())
- intentLock = false; // can't build indexes in intent mode
-
- const NamespaceString& nss = request->getNSS();
- invariant(!_collLock);
- invariant(!_dbLock);
- _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(),
- nss.db(),
- intentLock ? MODE_IX : MODE_X);
- _database = dbHolder().get(txn, nss.ns());
- if (intentLock && !_database) {
- // Ensure exclusive lock in case the database doesn't yet exist
- _dbLock.reset();
- _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_X);
- intentLock = false;
- }
- _collLock = stdx::make_unique<Lock::CollectionLock>(txn->lockState(),
- nss.ns(),
- intentLock ? MODE_IX : MODE_X);
- if (!checkIsMasterForDatabase(nss, result)) {
- return false;
- }
- if (!checkShardVersion(txn, &shardingState, *request, result)) {
- return false;
- }
- if (!checkIndexConstraints(txn, &shardingState, *request, result)) {
- return false;
- }
+WriteBatchExecutor::ExecInsertsState::ExecInsertsState(OperationContext* txn,
+ const BatchedCommandRequest* aRequest)
+ : txn(txn), request(aRequest), _transaction(txn, MODE_IX) {}
- if (!_database) {
- invariant(!intentLock);
- _database = dbHolder().openDb(txn, nss.ns());
- }
+bool WriteBatchExecutor::ExecInsertsState::_lockAndCheckImpl(WriteOpResult* result,
+ bool intentLock) {
+ if (hasLock()) {
CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
- _collection = _database->getCollection(request->getTargetingNS());
- if (!_collection) {
- if (intentLock) {
- // try again with full X lock.
- unlock();
- return _lockAndCheckImpl(result, false);
- }
-
- WriteUnitOfWork wunit (txn);
- // Implicitly create if it doesn't exist
- _collection = _database->createCollection(txn, request->getTargetingNS());
- if (!_collection) {
- result->setError(
- toWriteError(Status(ErrorCodes::InternalError,
- "could not create collection " +
- request->getTargetingNS())));
- return false;
- }
- wunit.commit();
- }
return true;
}
- bool WriteBatchExecutor::ExecInsertsState::lockAndCheck(WriteOpResult* result) {
- if (_lockAndCheckImpl(result, true))
- return true;
- unlock();
+ if (request->isInsertIndexRequest())
+ intentLock = false; // can't build indexes in intent mode
+
+ const NamespaceString& nss = request->getNSS();
+ invariant(!_collLock);
+ invariant(!_dbLock);
+ _dbLock =
+ stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), intentLock ? MODE_IX : MODE_X);
+ _database = dbHolder().get(txn, nss.ns());
+ if (intentLock && !_database) {
+ // Ensure exclusive lock in case the database doesn't yet exist
+ _dbLock.reset();
+ _dbLock = stdx::make_unique<Lock::DBLock>(txn->lockState(), nss.db(), MODE_X);
+ intentLock = false;
+ }
+ _collLock = stdx::make_unique<Lock::CollectionLock>(
+ txn->lockState(), nss.ns(), intentLock ? MODE_IX : MODE_X);
+ if (!checkIsMasterForDatabase(nss, result)) {
return false;
}
-
- void WriteBatchExecutor::ExecInsertsState::unlock() {
- _collection = nullptr;
- _database = nullptr;
- _collLock.reset();
- _dbLock.reset();
+ if (!checkShardVersion(txn, &shardingState, *request, result)) {
+ return false;
+ }
+ if (!checkIndexConstraints(txn, &shardingState, *request, result)) {
+ return false;
}
- static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult* result) {
- // we have to be top level so we can retry
- invariant(!state->txn->lockState()->inAWriteUnitOfWork() );
- invariant(state->currIndex < state->normalizedInserts.size());
-
- const StatusWith<BSONObj>& normalizedInsert(state->normalizedInserts[state->currIndex]);
+ if (!_database) {
+ invariant(!intentLock);
+ _database = dbHolder().openDb(txn, nss.ns());
+ }
+ CurOp::get(txn)->raiseDbProfileLevel(_database->getProfilingLevel());
+ _collection = _database->getCollection(request->getTargetingNS());
+ if (!_collection) {
+ if (intentLock) {
+ // try again with full X lock.
+ unlock();
+ return _lockAndCheckImpl(result, false);
+ }
- if (!normalizedInsert.isOK()) {
- result->setError(toWriteError(normalizedInsert.getStatus()));
- return;
+ WriteUnitOfWork wunit(txn);
+ // Implicitly create if it doesn't exist
+ _collection = _database->createCollection(txn, request->getTargetingNS());
+ if (!_collection) {
+ result->setError(
+ toWriteError(Status(ErrorCodes::InternalError,
+ "could not create collection " + request->getTargetingNS())));
+ return false;
}
+ wunit.commit();
+ }
+ return true;
+}
- const BSONObj& insertDoc = normalizedInsert.getValue().isEmpty() ?
- state->request->getInsertRequest()->getDocumentsAt( state->currIndex ) :
- normalizedInsert.getValue();
+bool WriteBatchExecutor::ExecInsertsState::lockAndCheck(WriteOpResult* result) {
+ if (_lockAndCheckImpl(result, true))
+ return true;
+ unlock();
+ return false;
+}
+
+void WriteBatchExecutor::ExecInsertsState::unlock() {
+ _collection = nullptr;
+ _database = nullptr;
+ _collLock.reset();
+ _dbLock.reset();
+}
+
+static void insertOne(WriteBatchExecutor::ExecInsertsState* state, WriteOpResult* result) {
+ // we have to be top level so we can retry
+ invariant(!state->txn->lockState()->inAWriteUnitOfWork());
+ invariant(state->currIndex < state->normalizedInserts.size());
+
+ const StatusWith<BSONObj>& normalizedInsert(state->normalizedInserts[state->currIndex]);
+
+ if (!normalizedInsert.isOK()) {
+ result->setError(toWriteError(normalizedInsert.getStatus()));
+ return;
+ }
- int attempt = 0;
- while (true) {
- try {
- if (!state->request->isInsertIndexRequest()) {
- if (state->lockAndCheck(result)) {
- singleInsert(state->txn, insertDoc, state->getCollection(), result);
- }
- }
- else {
- singleCreateIndex(state->txn, insertDoc, result);
+ const BSONObj& insertDoc = normalizedInsert.getValue().isEmpty()
+ ? state->request->getInsertRequest()->getDocumentsAt(state->currIndex)
+ : normalizedInsert.getValue();
+
+ int attempt = 0;
+ while (true) {
+ try {
+ if (!state->request->isInsertIndexRequest()) {
+ if (state->lockAndCheck(result)) {
+ singleInsert(state->txn, insertDoc, state->getCollection(), result);
}
- break;
- }
- catch ( const WriteConflictException& wce ) {
- state->unlock();
- CurOp::get(state->txn)->debug().writeConflicts++;
- state->txn->recoveryUnit()->abandonSnapshot();
- WriteConflictException::logAndBackoff( attempt++,
- "insert",
- state->getCollection() ?
- state->getCollection()->ns().ns() :
- "index" );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
- break;
+ } else {
+ singleCreateIndex(state->txn, insertDoc, result);
}
- catch (const DBException& ex) {
- Status status(ex.toStatus());
- if (ErrorCodes::isInterruption(status.code()))
- throw;
- result->setError(toWriteError(status));
- break;
- }
- }
-
- // Errors release the write lock, as a matter of policy.
- if (result->getError()) {
- state->txn->recoveryUnit()->abandonSnapshot();
+ break;
+ } catch (const WriteConflictException& wce) {
state->unlock();
+ CurOp::get(state->txn)->debug().writeConflicts++;
+ state->txn->recoveryUnit()->abandonSnapshot();
+ WriteConflictException::logAndBackoff(
+ attempt++,
+ "insert",
+ state->getCollection() ? state->getCollection()->ns().ns() : "index");
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ break;
+ } catch (const DBException& ex) {
+ Status status(ex.toStatus());
+ if (ErrorCodes::isInterruption(status.code()))
+ throw;
+ result->setError(toWriteError(status));
+ break;
}
}
- void WriteBatchExecutor::execOneInsert(ExecInsertsState* state, WriteErrorDetail** error) {
- BatchItemRef currInsertItem(state->request, state->currIndex);
- CurOp currentOp(_txn);
- beginCurrentOp(_txn, currInsertItem);
- incOpStats(currInsertItem);
+ // Errors release the write lock, as a matter of policy.
+ if (result->getError()) {
+ state->txn->recoveryUnit()->abandonSnapshot();
+ state->unlock();
+ }
+}
- WriteOpResult result;
- insertOne(state, &result);
+void WriteBatchExecutor::execOneInsert(ExecInsertsState* state, WriteErrorDetail** error) {
+ BatchItemRef currInsertItem(state->request, state->currIndex);
+ CurOp currentOp(_txn);
+ beginCurrentOp(_txn, currInsertItem);
+ incOpStats(currInsertItem);
- incWriteStats(currInsertItem,
- result.getStats(),
- result.getError(),
- &currentOp);
- finishCurrentOp(_txn, result.getError());
+ WriteOpResult result;
+ insertOne(state, &result);
- if (result.getError()) {
- *error = result.releaseError();
- }
- }
+ incWriteStats(currInsertItem, result.getStats(), result.getError(), &currentOp);
+ finishCurrentOp(_txn, result.getError());
- /**
- * Perform a single insert into a collection. Requires the insert be preprocessed and the
- * collection already has been created.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void singleInsert( OperationContext* txn,
- const BSONObj& docToInsert,
- Collection* collection,
- WriteOpResult* result ) {
-
- const string& insertNS = collection->ns().ns();
- invariant(txn->lockState()->isCollectionLockedForMode(insertNS, MODE_IX));
+ if (result.getError()) {
+ *error = result.releaseError();
+ }
+}
- WriteUnitOfWork wunit(txn);
- StatusWith<RecordId> status = collection->insertDocument( txn, docToInsert, true );
+/**
+ * Perform a single insert into a collection. Requires the insert be preprocessed and the
+ * collection already has been created.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void singleInsert(OperationContext* txn,
+ const BSONObj& docToInsert,
+ Collection* collection,
+ WriteOpResult* result) {
+ const string& insertNS = collection->ns().ns();
+ invariant(txn->lockState()->isCollectionLockedForMode(insertNS, MODE_IX));
+
+ WriteUnitOfWork wunit(txn);
+ StatusWith<RecordId> status = collection->insertDocument(txn, docToInsert, true);
+
+ if (!status.isOK()) {
+ result->setError(toWriteError(status.getStatus()));
+ } else {
+ result->getStats().n = 1;
+ wunit.commit();
+ }
+}
- if ( !status.isOK() ) {
- result->setError(toWriteError(status.getStatus()));
- }
- else {
- result->getStats().n = 1;
- wunit.commit();
+/**
+ * Perform a single index creation on a collection. Requires the index descriptor be
+ * preprocessed.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void singleCreateIndex(OperationContext* txn,
+ const BSONObj& indexDesc,
+ WriteOpResult* result) {
+ BSONElement nsElement = indexDesc["ns"];
+ uassert(ErrorCodes::NoSuchKey, "Missing \"ns\" field in index description", !nsElement.eoo());
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"ns\" field of index description to be a "
+ "string, "
+ "but found a " << typeName(nsElement.type()),
+ nsElement.type() == String);
+ const NamespaceString ns(nsElement.valueStringData());
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder << "createIndexes" << ns.coll();
+ cmdBuilder << "indexes" << BSON_ARRAY(indexDesc);
+ BSONObj cmd = cmdBuilder.done();
+ Command* createIndexesCmd = Command::findCommand("createIndexes");
+ invariant(createIndexesCmd);
+ std::string errmsg;
+ BSONObjBuilder resultBuilder;
+ const bool success =
+ createIndexesCmd->run(txn, ns.db().toString(), cmd, 0, errmsg, resultBuilder);
+ Command::appendCommandStatus(resultBuilder, success, errmsg);
+ BSONObj cmdResult = resultBuilder.done();
+ uassertStatusOK(Command::getStatusFromCommandResult(cmdResult));
+ result->getStats().n =
+ cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
+}
+
+static void multiUpdate(OperationContext* txn,
+ const BatchItemRef& updateItem,
+ WriteOpResult* result) {
+ const NamespaceString nsString(updateItem.getRequest()->getNS());
+ const bool isMulti = updateItem.getUpdate()->getMulti();
+ UpdateRequest request(nsString);
+ request.setQuery(updateItem.getUpdate()->getQuery());
+ request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
+ request.setMulti(isMulti);
+ request.setUpsert(updateItem.getUpdate()->getUpsert());
+ UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
+ request.setLifecycle(&updateLifecycle);
+
+ // Updates from the write commands path can yield.
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ int attempt = 0;
+ bool createCollection = false;
+ for (int fakeLoop = 0; fakeLoop < 1; fakeLoop++) {
+ ParsedUpdate parsedUpdate(txn, &request);
+ Status status = parsedUpdate.parseRequest();
+ if (!status.isOK()) {
+ result->setError(toWriteError(status));
+ return;
}
- }
- /**
- * Perform a single index creation on a collection. Requires the index descriptor be
- * preprocessed.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void singleCreateIndex(OperationContext* txn,
- const BSONObj& indexDesc,
- WriteOpResult* result) {
+ if (createCollection) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ const AutoGetOrCreateDb adb{txn, nsString.db(), MODE_X};
- BSONElement nsElement = indexDesc["ns"];
- uassert(ErrorCodes::NoSuchKey,
- "Missing \"ns\" field in index description",
- !nsElement.eoo());
- uassert(ErrorCodes::TypeMismatch,
- str::stream() << "Expected \"ns\" field of index description to be a " "string, "
- "but found a " << typeName(nsElement.type()),
- nsElement.type() == String);
- const NamespaceString ns(nsElement.valueStringData());
- BSONObjBuilder cmdBuilder;
- cmdBuilder << "createIndexes" << ns.coll();
- cmdBuilder << "indexes" << BSON_ARRAY(indexDesc);
- BSONObj cmd = cmdBuilder.done();
- Command* createIndexesCmd = Command::findCommand("createIndexes");
- invariant(createIndexesCmd);
- std::string errmsg;
- BSONObjBuilder resultBuilder;
- const bool success = createIndexesCmd->run(
- txn,
- ns.db().toString(),
- cmd,
- 0,
- errmsg,
- resultBuilder);
- Command::appendCommandStatus(resultBuilder, success, errmsg);
- BSONObj cmdResult = resultBuilder.done();
- uassertStatusOK(Command::getStatusFromCommandResult(cmdResult));
- result->getStats().n =
- cmdResult["numIndexesAfter"].numberInt() - cmdResult["numIndexesBefore"].numberInt();
- }
+ if (!checkIsMasterForDatabase(nsString, result)) {
+ return;
+ }
- static void multiUpdate( OperationContext* txn,
- const BatchItemRef& updateItem,
- WriteOpResult* result ) {
-
- const NamespaceString nsString(updateItem.getRequest()->getNS());
- const bool isMulti = updateItem.getUpdate()->getMulti();
- UpdateRequest request(nsString);
- request.setQuery(updateItem.getUpdate()->getQuery());
- request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
- request.setMulti(isMulti);
- request.setUpsert(updateItem.getUpdate()->getUpsert());
- UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
- request.setLifecycle(&updateLifecycle);
-
- // Updates from the write commands path can yield.
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- int attempt = 0;
- bool createCollection = false;
- for ( int fakeLoop = 0; fakeLoop < 1; fakeLoop++ ) {
-
- ParsedUpdate parsedUpdate(txn, &request);
- Status status = parsedUpdate.parseRequest();
- if (!status.isOK()) {
- result->setError(toWriteError(status));
- return;
+ Database* const db = adb.getDb();
+ if (db->getCollection(nsString.ns())) {
+ // someone else beat us to it
+ } else {
+ WriteUnitOfWork wuow(txn);
+ uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
+ wuow.commit();
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
+ }
- if ( createCollection ) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- const AutoGetOrCreateDb adb{txn, nsString.db(), MODE_X};
+ ///////////////////////////////////////////
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(
+ txn->lockState(), nsString.ns(), parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
+ ///////////////////////////////////////////
- if (!checkIsMasterForDatabase(nsString, result)) {
- return;
- }
+ if (!checkIsMasterForDatabase(nsString, result)) {
+ return;
+ }
- Database* const db = adb.getDb();
- if ( db->getCollection( nsString.ns() ) ) {
- // someone else beat us to it
- }
- else {
- WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
- wuow.commit();
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
- }
+ if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
+ return;
- ///////////////////////////////////////////
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsString.db(), MODE_IX);
- Lock::CollectionLock colLock(txn->lockState(),
- nsString.ns(),
- parsedUpdate.isIsolated() ? MODE_X : MODE_IX);
- ///////////////////////////////////////////
+ Database* const db = dbHolder().get(txn, nsString.db());
- if (!checkIsMasterForDatabase(nsString, result)) {
+ if (db == NULL) {
+ if (createCollection) {
+ // we raced with some, accept defeat
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
return;
}
- if (!checkShardVersion(txn, &shardingState, *updateItem.getRequest(), result))
+ // Database not yet created
+ if (!request.isUpsert()) {
+ // not an upsert, no database, nothing to do
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
return;
-
- Database* const db = dbHolder().get(txn, nsString.db());
-
- if (db == NULL) {
- if (createCollection) {
- // we raced with some, accept defeat
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
-
- // Database not yet created
- if (!request.isUpsert()) {
- // not an upsert, no database, nothing to do
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
-
- // upsert, don't try to get a context as no MODE_X lock is held
- fakeLoop = -1;
- createCollection = true;
- continue;
}
- CurOp::get(txn)->raiseDbProfileLevel(db->getProfilingLevel());
- Collection* collection = db->getCollection(nsString.ns());
-
- if ( collection == NULL ) {
- if ( createCollection ) {
- // we raced with some, accept defeat
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
+ // upsert, don't try to get a context as no MODE_X lock is held
+ fakeLoop = -1;
+ createCollection = true;
+ continue;
+ }
- if ( !request.isUpsert() ) {
- // not an upsert, no collection, nothing to do
- result->getStats().nModified = 0;
- result->getStats().n = 0;
- return;
- }
+ CurOp::get(txn)->raiseDbProfileLevel(db->getProfilingLevel());
+ Collection* collection = db->getCollection(nsString.ns());
- // upsert, mark that we should create collection
- fakeLoop = -1;
- createCollection = true;
- continue;
+ if (collection == NULL) {
+ if (createCollection) {
+ // we raced with some, accept defeat
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
+ return;
}
- OpDebug* debug = &CurOp::get(txn)->debug();
+ if (!request.isUpsert()) {
+ // not an upsert, no collection, nothing to do
+ result->getStats().nModified = 0;
+ result->getStats().n = 0;
+ return;
+ }
- try {
- invariant(collection);
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
+ // upsert, mark that we should create collection
+ fakeLoop = -1;
+ createCollection = true;
+ continue;
+ }
- uassertStatusOK(exec->executePlan());
- UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), debug);
+ OpDebug* debug = &CurOp::get(txn)->debug();
- const long long numDocsModified = res.numDocsModified;
- const long long numMatched = res.numMatched;
- const BSONObj resUpsertedID = res.upserted;
+ try {
+ invariant(collection);
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- // We have an _id from an insert
- const bool didInsert = !resUpsertedID.isEmpty();
+ uassertStatusOK(exec->executePlan());
+ UpdateResult res = UpdateStage::makeUpdateResult(exec.get(), debug);
- result->getStats().nModified = didInsert ? 0 : numDocsModified;
- result->getStats().n = didInsert ? 1 : numMatched;
- result->getStats().upsertedID = resUpsertedID;
- }
- catch ( const WriteConflictException& dle ) {
- debug->writeConflicts++;
- if ( isMulti ) {
- log() << "Had WriteConflict during multi update, aborting";
- throw;
- }
+ const long long numDocsModified = res.numDocsModified;
+ const long long numMatched = res.numMatched;
+ const BSONObj resUpsertedID = res.upserted;
- createCollection = false;
- // RESTART LOOP
- fakeLoop = -1;
- txn->recoveryUnit()->abandonSnapshot();
+ // We have an _id from an insert
+ const bool didInsert = !resUpsertedID.isEmpty();
- WriteConflictException::logAndBackoff( attempt++, "update", nsString.ns() );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
+ result->getStats().nModified = didInsert ? 0 : numDocsModified;
+ result->getStats().n = didInsert ? 1 : numMatched;
+ result->getStats().upsertedID = resUpsertedID;
+ } catch (const WriteConflictException& dle) {
+ debug->writeConflicts++;
+ if (isMulti) {
+ log() << "Had WriteConflict during multi update, aborting";
+ throw;
}
- catch (const DBException& ex) {
- Status status = ex.toStatus();
- if (ErrorCodes::isInterruption(status.code())) {
- throw;
- }
- result->setError(toWriteError(status));
+
+ createCollection = false;
+ // RESTART LOOP
+ fakeLoop = -1;
+ txn->recoveryUnit()->abandonSnapshot();
+
+ WriteConflictException::logAndBackoff(attempt++, "update", nsString.ns());
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ } catch (const DBException& ex) {
+ Status status = ex.toStatus();
+ if (ErrorCodes::isInterruption(status.code())) {
+ throw;
}
+ result->setError(toWriteError(status));
}
}
+}
- /**
- * Perform a remove operation, which might remove multiple documents. Dispatches to remove code
- * currently to do most of this.
- *
- * Might fault or error, otherwise populates the result.
- */
- static void multiRemove( OperationContext* txn,
- const BatchItemRef& removeItem,
- WriteOpResult* result ) {
-
- const NamespaceString& nss = removeItem.getRequest()->getNSS();
- DeleteRequest request(nss);
- request.setQuery( removeItem.getDelete()->getQuery() );
- request.setMulti( removeItem.getDelete()->getLimit() != 1 );
- request.setGod( false );
-
- // Deletes running through the write commands path can yield.
- request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- int attempt = 1;
- while ( 1 ) {
- try {
-
- ParsedDelete parsedDelete(txn, &request);
- Status status = parsedDelete.parseRequest();
- if (!status.isOK()) {
- result->setError(toWriteError(status));
- return;
- }
-
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, nss.db(), MODE_IX);
- if (!autoDb.getDb()) {
- break;
- }
-
- CurOp::get(txn)->raiseDbProfileLevel(autoDb.getDb()->getProfilingLevel());
- Lock::CollectionLock collLock(txn->lockState(),
- nss.ns(),
- parsedDelete.isIsolated() ? MODE_X : MODE_IX);
-
- // getExecutorDelete() also checks if writes are allowed.
- if (!checkIsMasterForDatabase(nss, result)) {
- return;
- }
- // Check version once we're locked
-
- if (!checkShardVersion(txn, &shardingState, *removeItem.getRequest(), result)) {
- // Version error
- return;
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn,
- autoDb.getDb()->getCollection(nss),
- &parsedDelete,
- &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Execute the delete and retrieve the number deleted.
- uassertStatusOK(exec->executePlan());
- result->getStats().n = DeleteStage::getNumDeleted(exec.get());
+/**
+ * Perform a remove operation, which might remove multiple documents. Dispatches to remove code
+ * currently to do most of this.
+ *
+ * Might fault or error, otherwise populates the result.
+ */
+static void multiRemove(OperationContext* txn,
+ const BatchItemRef& removeItem,
+ WriteOpResult* result) {
+ const NamespaceString& nss = removeItem.getRequest()->getNSS();
+ DeleteRequest request(nss);
+ request.setQuery(removeItem.getDelete()->getQuery());
+ request.setMulti(removeItem.getDelete()->getLimit() != 1);
+ request.setGod(false);
+
+ // Deletes running through the write commands path can yield.
+ request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ int attempt = 1;
+ while (1) {
+ try {
+ ParsedDelete parsedDelete(txn, &request);
+ Status status = parsedDelete.parseRequest();
+ if (!status.isOK()) {
+ result->setError(toWriteError(status));
+ return;
+ }
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, nss.db(), MODE_IX);
+ if (!autoDb.getDb()) {
break;
}
- catch ( const WriteConflictException& dle ) {
- CurOp::get(txn)->debug().writeConflicts++;
- WriteConflictException::logAndBackoff( attempt++, "delete", nss.ns() );
- }
- catch (const StaleConfigException& staleExcep) {
- result->setError(new WriteErrorDetail);
- result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
- buildStaleError(staleExcep.getVersionReceived(),
- staleExcep.getVersionWanted(),
- result->getError());
+
+ CurOp::get(txn)->raiseDbProfileLevel(autoDb.getDb()->getProfilingLevel());
+ Lock::CollectionLock collLock(
+ txn->lockState(), nss.ns(), parsedDelete.isIsolated() ? MODE_X : MODE_IX);
+
+ // getExecutorDelete() also checks if writes are allowed.
+ if (!checkIsMasterForDatabase(nss, result)) {
return;
}
- catch ( const DBException& ex ) {
- Status status = ex.toStatus();
- if (ErrorCodes::isInterruption(status.code())) {
- throw;
- }
- result->setError(toWriteError(status));
+ // Check version once we're locked
+
+ if (!checkShardVersion(txn, &shardingState, *removeItem.getRequest(), result)) {
+ // Version error
return;
}
+
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorDelete(
+ txn, autoDb.getDb()->getCollection(nss), &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Execute the delete and retrieve the number deleted.
+ uassertStatusOK(exec->executePlan());
+ result->getStats().n = DeleteStage::getNumDeleted(exec.get());
+
+ break;
+ } catch (const WriteConflictException& dle) {
+ CurOp::get(txn)->debug().writeConflicts++;
+ WriteConflictException::logAndBackoff(attempt++, "delete", nss.ns());
+ } catch (const StaleConfigException& staleExcep) {
+ result->setError(new WriteErrorDetail);
+ result->getError()->setErrCode(ErrorCodes::StaleShardVersion);
+ buildStaleError(
+ staleExcep.getVersionReceived(), staleExcep.getVersionWanted(), result->getError());
+ return;
+ } catch (const DBException& ex) {
+ Status status = ex.toStatus();
+ if (ErrorCodes::isInterruption(status.code())) {
+ throw;
+ }
+ result->setError(toWriteError(status));
+ return;
}
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/batch_executor.h b/src/mongo/db/commands/write_commands/batch_executor.h
index 0bab41d3ff8..0dd1d71848a 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.h
+++ b/src/mongo/db/commands/write_commands/batch_executor.h
@@ -40,158 +40,148 @@
namespace mongo {
- class BSONObjBuilder;
- class CurOp;
- class LastError;
- class OpCounters;
- class OperationContext;
- class WriteBatchStats;
- struct WriteOpStats;
+class BSONObjBuilder;
+class CurOp;
+class LastError;
+class OpCounters;
+class OperationContext;
+class WriteBatchStats;
+struct WriteOpStats;
+
+/**
+ * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
+ */
+class WriteBatchExecutor {
+ MONGO_DISALLOW_COPYING(WriteBatchExecutor);
+
+public:
+ // State object used by private execInserts. TODO: Do not expose this type.
+ class ExecInsertsState;
+
+ WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le);
/**
- * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
+ * Issues writes with requested write concern. Fills response with errors if problems
+ * occur.
*/
- class WriteBatchExecutor {
- MONGO_DISALLOW_COPYING(WriteBatchExecutor);
- public:
-
- // State object used by private execInserts. TODO: Do not expose this type.
- class ExecInsertsState;
-
- WriteBatchExecutor( OperationContext* txn,
- OpCounters* opCounters,
- LastError* le );
-
- /**
- * Issues writes with requested write concern. Fills response with errors if problems
- * occur.
- */
- void executeBatch( const BatchedCommandRequest& request, BatchedCommandResponse* response );
-
- const WriteBatchStats& getStats() const;
-
- /**
- * Does basic validation of the batch request. Returns a non-OK status if
- * any problems with the batch are found.
- */
- static Status validateBatch( const BatchedCommandRequest& request );
-
- private:
- /**
- * Executes the writes in the batch and returns upserted _ids and write errors.
- * Dispatches to one of the three functions below for DBLock, CurOp, and stats management.
- */
- void bulkExecute( const BatchedCommandRequest& request,
- std::vector<BatchedUpsertDetail*>* upsertedIds,
- std::vector<WriteErrorDetail*>* errors );
-
- /**
- * Executes the inserts of an insert batch and returns the write errors.
- *
- * Internally uses the DBLock of the request namespace.
- * May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple
- * times.
- */
- void execInserts( const BatchedCommandRequest& request,
- std::vector<WriteErrorDetail*>* errors );
-
- /**
- * Executes a single insert from a batch, described in the opaque "state" object.
- */
- void execOneInsert( ExecInsertsState* state, WriteErrorDetail** error );
-
- /**
- * Executes an update item (which may update many documents or upsert), and returns the
- * upserted _id on upsert or error on failure.
- *
- * Internally uses the DBLock of the update namespace.
- * May take the DBLock multiple times.
- */
- void execUpdate( const BatchItemRef& updateItem,
- BSONObj* upsertedId,
- WriteErrorDetail** error );
-
- /**
- * Executes a delete item (which may remove many documents) and returns an error on failure.
- *
- * Internally uses the DBLock of the delete namespace.
- * May take the DBLock multiple times.
- */
- void execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error );
-
- /**
- * Helper for incrementing stats on the next CurOp.
- *
- * No lock requirements.
- */
- void incOpStats( const BatchItemRef& currWrite );
-
- /**
- * Helper for incrementing stats after each individual write op.
- *
- * No lock requirements (though usually done inside write lock to make stats update look
- * atomic).
- */
- void incWriteStats( const BatchItemRef& currWrite,
- const WriteOpStats& stats,
- const WriteErrorDetail* error,
- CurOp* currentOp );
-
- OperationContext* _txn;
-
- // OpCounters object to update - needed for stats reporting
- // Not owned here.
- OpCounters* _opCounters;
-
- // LastError object to use for preparing write results - needed for stats reporting
- // Not owned here.
- LastError* _le;
-
- // Stats
- std::unique_ptr<WriteBatchStats> _stats;
- };
+ void executeBatch(const BatchedCommandRequest& request, BatchedCommandResponse* response);
+
+ const WriteBatchStats& getStats() const;
/**
- * Holds information about the result of a single write operation.
+ * Does basic validation of the batch request. Returns a non-OK status if
+ * any problems with the batch are found.
*/
- struct WriteOpStats {
+ static Status validateBatch(const BatchedCommandRequest& request);
- WriteOpStats() :
- n( 0 ), nModified( 0 ) {
- }
+private:
+ /**
+ * Executes the writes in the batch and returns upserted _ids and write errors.
+ * Dispatches to one of the three functions below for DBLock, CurOp, and stats management.
+ */
+ void bulkExecute(const BatchedCommandRequest& request,
+ std::vector<BatchedUpsertDetail*>* upsertedIds,
+ std::vector<WriteErrorDetail*>* errors);
- void reset() {
- n = 0;
- nModified = 0;
- upsertedID = BSONObj();
- }
+ /**
+ * Executes the inserts of an insert batch and returns the write errors.
+ *
+ * Internally uses the DBLock of the request namespace.
+ * May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple
+ * times.
+ */
+ void execInserts(const BatchedCommandRequest& request, std::vector<WriteErrorDetail*>* errors);
- // Num docs logically affected by this operation.
- int n;
+ /**
+ * Executes a single insert from a batch, described in the opaque "state" object.
+ */
+ void execOneInsert(ExecInsertsState* state, WriteErrorDetail** error);
+
+ /**
+ * Executes an update item (which may update many documents or upsert), and returns the
+ * upserted _id on upsert or error on failure.
+ *
+ * Internally uses the DBLock of the update namespace.
+ * May take the DBLock multiple times.
+ */
+ void execUpdate(const BatchItemRef& updateItem, BSONObj* upsertedId, WriteErrorDetail** error);
- // Num docs actually modified by this operation, if applicable (update)
- int nModified;
+ /**
+ * Executes a delete item (which may remove many documents) and returns an error on failure.
+ *
+ * Internally uses the DBLock of the delete namespace.
+ * May take the DBLock multiple times.
+ */
+ void execRemove(const BatchItemRef& removeItem, WriteErrorDetail** error);
- // _id of newly upserted document, if applicable (update)
- BSONObj upsertedID;
- };
+ /**
+ * Helper for incrementing stats on the next CurOp.
+ *
+ * No lock requirements.
+ */
+ void incOpStats(const BatchItemRef& currWrite);
/**
- * Full stats accumulated by a write batch execution. Note that these stats do not directly
- * correspond to the stats accumulated in opCounters and LastError.
+ * Helper for incrementing stats after each individual write op.
+ *
+ * No lock requirements (though usually done inside write lock to make stats update look
+ * atomic).
*/
- class WriteBatchStats {
- public:
+ void incWriteStats(const BatchItemRef& currWrite,
+ const WriteOpStats& stats,
+ const WriteErrorDetail* error,
+ CurOp* currentOp);
- WriteBatchStats() :
- numInserted( 0 ), numUpserted( 0 ), numMatched( 0 ), numModified( 0 ), numDeleted( 0 ) {
- }
+ OperationContext* _txn;
- int numInserted;
- int numUpserted;
- int numMatched;
- int numModified;
- int numDeleted;
- };
+ // OpCounters object to update - needed for stats reporting
+ // Not owned here.
+ OpCounters* _opCounters;
-} // namespace mongo
+ // LastError object to use for preparing write results - needed for stats reporting
+ // Not owned here.
+ LastError* _le;
+
+ // Stats
+ std::unique_ptr<WriteBatchStats> _stats;
+};
+
+/**
+ * Holds information about the result of a single write operation.
+ */
+struct WriteOpStats {
+ WriteOpStats() : n(0), nModified(0) {}
+
+ void reset() {
+ n = 0;
+ nModified = 0;
+ upsertedID = BSONObj();
+ }
+
+ // Num docs logically affected by this operation.
+ int n;
+
+ // Num docs actually modified by this operation, if applicable (update)
+ int nModified;
+
+ // _id of newly upserted document, if applicable (update)
+ BSONObj upsertedID;
+};
+
+/**
+ * Full stats accumulated by a write batch execution. Note that these stats do not directly
+ * correspond to the stats accumulated in opCounters and LastError.
+ */
+class WriteBatchStats {
+public:
+ WriteBatchStats()
+ : numInserted(0), numUpserted(0), numMatched(0), numModified(0), numDeleted(0) {}
+
+ int numInserted;
+ int numUpserted;
+ int numMatched;
+ int numModified;
+ int numDeleted;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index 4bf374778fb..fe6bb9a2ff9 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -53,270 +53,265 @@
namespace mongo {
- using std::string;
- using std::stringstream;
+using std::string;
+using std::stringstream;
- namespace {
+namespace {
- MONGO_INITIALIZER(RegisterWriteCommands)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CmdInsert();
- new CmdUpdate();
- new CmdDelete();
- return Status::OK();
- }
-
- } // namespace
+MONGO_INITIALIZER(RegisterWriteCommands)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CmdInsert();
+ new CmdUpdate();
+ new CmdDelete();
+ return Status::OK();
+}
- WriteCmd::WriteCmd( StringData name, BatchedCommandRequest::BatchType writeType ) :
- Command( name ), _writeType( writeType ) {
- }
+} // namespace
- void WriteCmd::redactTooLongLog( mutablebson::Document* cmdObj, StringData fieldName ) {
- namespace mmb = mutablebson;
- mmb::Element root = cmdObj->root();
- mmb::Element field = root.findFirstChildNamed( fieldName );
+WriteCmd::WriteCmd(StringData name, BatchedCommandRequest::BatchType writeType)
+ : Command(name), _writeType(writeType) {}
- // If the cmdObj is too large, it will be a "too big" message given by CachedBSONObj.get()
- if ( !field.ok() ) {
- return;
- }
+void WriteCmd::redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName) {
+ namespace mmb = mutablebson;
+ mmb::Element root = cmdObj->root();
+ mmb::Element field = root.findFirstChildNamed(fieldName);
- // Redact the log if there are more than one documents or operations.
- if ( field.countChildren() > 1 ) {
- field.setValueInt( field.countChildren() );
- }
+ // If the cmdObj is too large, it will be a "too big" message given by CachedBSONObj.get()
+ if (!field.ok()) {
+ return;
}
- // Slaves can't perform writes.
- bool WriteCmd::slaveOk() const { return false; }
+ // Redact the log if there are more than one documents or operations.
+ if (field.countChildren() > 1) {
+ field.setValueInt(field.countChildren());
+ }
+}
+
+// Slaves can't perform writes.
+bool WriteCmd::slaveOk() const {
+ return false;
+}
+
+bool WriteCmd::isWriteCommandForConfigServer() const {
+ return false;
+}
+
+Status WriteCmd::checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ Status status(auth::checkAuthForWriteCommand(AuthorizationSession::get(client),
+ _writeType,
+ NamespaceString(parseNs(dbname, cmdObj)),
+ cmdObj));
+
+ // TODO: Remove this when we standardize GLE reporting from commands
+ if (!status.isOK()) {
+ LastError::get(client).setLastError(status.code(), status.reason());
+ }
- bool WriteCmd::isWriteCommandForConfigServer() const { return false; }
+ return status;
+}
+
+// Write commands are counted towards their corresponding opcounters, not command opcounters.
+bool WriteCmd::shouldAffectCommandCounter() const {
+ return false;
+}
+
+bool WriteCmd::run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errMsg,
+ BSONObjBuilder& result) {
+ // Can't be run on secondaries.
+ dassert(txn->writesAreReplicated());
+ BatchedCommandRequest request(_writeType);
+ BatchedCommandResponse response;
+
+ if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
+ return appendCommandStatus(result, Status(ErrorCodes::FailedToParse, errMsg));
+ }
- Status WriteCmd::checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
+ // Note that this is a runCommmand, and therefore, the database and the collection name
+ // are in different parts of the grammar for the command. But it's more convenient to
+ // work with a NamespaceString. We built it here and replace it in the parsed command.
+ // Internally, everything work with the namespace string as opposed to just the
+ // collection name.
+ NamespaceString nss(dbName, request.getNS());
+ request.setNSS(nss);
- Status status( auth::checkAuthForWriteCommand( AuthorizationSession::get(client),
- _writeType,
- NamespaceString( parseNs( dbname, cmdObj ) ),
- cmdObj ));
+ StatusWith<WriteConcernOptions> wcStatus = extractWriteConcern(cmdObj);
- // TODO: Remove this when we standardize GLE reporting from commands
- if ( !status.isOK() ) {
- LastError::get(client).setLastError(status.code(), status.reason());
- }
+ if (!wcStatus.isOK()) {
+ return appendCommandStatus(result, wcStatus.getStatus());
+ }
+ txn->setWriteConcern(wcStatus.getValue());
+
+ WriteBatchExecutor writeBatchExecutor(
+ txn, &globalOpCounters, &LastError::get(txn->getClient()));
+
+ writeBatchExecutor.executeBatch(request, &response);
+
+ result.appendElements(response.toBSON());
+ return response.getOk();
+}
+
+Status WriteCmd::explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ // For now we only explain update and delete write commands.
+ if (BatchedCommandRequest::BatchType_Update != _writeType &&
+ BatchedCommandRequest::BatchType_Delete != _writeType) {
+ return Status(ErrorCodes::IllegalOperation,
+ "Only update and delete write ops can be explained");
+ }
- return status;
+ // Parse the batch request.
+ BatchedCommandRequest request(_writeType);
+ std::string errMsg;
+ if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
}
- // Write commands are counted towards their corresponding opcounters, not command opcounters.
- bool WriteCmd::shouldAffectCommandCounter() const { return false; }
-
- bool WriteCmd::run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int options,
- string& errMsg,
- BSONObjBuilder& result) {
- // Can't be run on secondaries.
- dassert(txn->writesAreReplicated());
- BatchedCommandRequest request( _writeType );
- BatchedCommandResponse response;
-
- if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
- return appendCommandStatus( result, Status( ErrorCodes::FailedToParse, errMsg ) );
- }
+ // Note that this is a runCommmand, and therefore, the database and the collection name
+ // are in different parts of the grammar for the command. But it's more convenient to
+ // work with a NamespaceString. We built it here and replace it in the parsed command.
+ // Internally, everything work with the namespace string as opposed to just the
+ // collection name.
+ NamespaceString nsString(dbname, request.getNS());
+ request.setNSS(nsString);
+
+ // Do the validation of the batch that is shared with non-explained write batches.
+ Status isValid = WriteBatchExecutor::validateBatch(request);
+ if (!isValid.isOK()) {
+ return isValid;
+ }
- // Note that this is a runCommmand, and therefore, the database and the collection name
- // are in different parts of the grammar for the command. But it's more convenient to
- // work with a NamespaceString. We built it here and replace it in the parsed command.
- // Internally, everything work with the namespace string as opposed to just the
- // collection name.
- NamespaceString nss(dbName, request.getNS());
- request.setNSS(nss);
+ // Explain must do one additional piece of validation: For now we only explain
+ // singleton batches.
+ if (request.sizeWriteOps() != 1u) {
+ return Status(ErrorCodes::InvalidLength, "explained write batches must be of size 1");
+ }
- StatusWith<WriteConcernOptions> wcStatus = extractWriteConcern(cmdObj);
+ ScopedTransaction scopedXact(txn, MODE_IX);
- if (!wcStatus.isOK()) {
- return appendCommandStatus(result, wcStatus.getStatus());
- }
- txn->setWriteConcern(wcStatus.getValue());
+ // Get a reference to the singleton batch item (it's the 0th item in the batch).
+ BatchItemRef batchItem(&request, 0);
- WriteBatchExecutor writeBatchExecutor(txn,
- &globalOpCounters,
- &LastError::get(txn->getClient()));
+ if (BatchedCommandRequest::BatchType_Update == _writeType) {
+ // Create the update request.
+ UpdateRequest updateRequest(nsString);
+ updateRequest.setQuery(batchItem.getUpdate()->getQuery());
+ updateRequest.setUpdates(batchItem.getUpdate()->getUpdateExpr());
+ updateRequest.setMulti(batchItem.getUpdate()->getMulti());
+ updateRequest.setUpsert(batchItem.getUpdate()->getUpsert());
+ UpdateLifecycleImpl updateLifecycle(true, updateRequest.getNamespaceString());
+ updateRequest.setLifecycle(&updateLifecycle);
+ updateRequest.setExplain();
- writeBatchExecutor.executeBatch( request, &response );
+ // Explained updates can yield.
+ updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
- result.appendElements( response.toBSON() );
- return response.getOk();
- }
+ OpDebug* debug = &CurOp::get(txn)->debug();
- Status WriteCmd::explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- // For now we only explain update and delete write commands.
- if ( BatchedCommandRequest::BatchType_Update != _writeType &&
- BatchedCommandRequest::BatchType_Delete != _writeType ) {
- return Status( ErrorCodes::IllegalOperation,
- "Only update and delete write ops can be explained" );
+ ParsedUpdate parsedUpdate(txn, &updateRequest);
+ Status parseStatus = parsedUpdate.parseRequest();
+ if (!parseStatus.isOK()) {
+ return parseStatus;
}
- // Parse the batch request.
- BatchedCommandRequest request( _writeType );
- std::string errMsg;
- if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) {
- return Status( ErrorCodes::FailedToParse, errMsg );
- }
+ // Explains of write commands are read-only, but we take write locks so
+ // that timing info is more accurate.
+ AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
- // Note that this is a runCommmand, and therefore, the database and the collection name
- // are in different parts of the grammar for the command. But it's more convenient to
- // work with a NamespaceString. We built it here and replace it in the parsed command.
- // Internally, everything work with the namespace string as opposed to just the
- // collection name.
- NamespaceString nsString(dbname, request.getNS());
- request.setNSS(nsString);
-
- // Do the validation of the batch that is shared with non-explained write batches.
- Status isValid = WriteBatchExecutor::validateBatch( request );
- if (!isValid.isOK()) {
- return isValid;
- }
+ ensureShardVersionOKOrThrow(txn->getClient(), nsString.ns());
- // Explain must do one additional piece of validation: For now we only explain
- // singleton batches.
- if ( request.sizeWriteOps() != 1u ) {
- return Status( ErrorCodes::InvalidLength,
- "explained write batches must be of size 1" );
+ // Get a pointer to the (possibly NULL) collection.
+ Collection* collection = NULL;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
}
- ScopedTransaction scopedXact(txn, MODE_IX);
-
- // Get a reference to the singleton batch item (it's the 0th item in the batch).
- BatchItemRef batchItem( &request, 0 );
-
- if ( BatchedCommandRequest::BatchType_Update == _writeType ) {
- // Create the update request.
- UpdateRequest updateRequest( nsString );
- updateRequest.setQuery( batchItem.getUpdate()->getQuery() );
- updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
- updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
- updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
- UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
- updateRequest.setLifecycle( &updateLifecycle );
- updateRequest.setExplain();
-
- // Explained updates can yield.
- updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- OpDebug* debug = &CurOp::get(txn)->debug();
-
- ParsedUpdate parsedUpdate( txn, &updateRequest );
- Status parseStatus = parsedUpdate.parseRequest();
- if ( !parseStatus.isOK() ) {
- return parseStatus;
- }
-
- // Explains of write commands are read-only, but we take write locks so
- // that timing info is more accurate.
- AutoGetDb autoDb( txn, nsString.db(), MODE_IX );
- Lock::CollectionLock colLock( txn->lockState(), nsString.ns(), MODE_IX );
-
- ensureShardVersionOKOrThrow( txn->getClient(), nsString.ns() );
-
- // Get a pointer to the (possibly NULL) collection.
- Collection* collection = NULL;
- if ( autoDb.getDb() ) {
- collection = autoDb.getDb()->getCollection( nsString.ns() );
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Explain the plan tree.
- Explain::explainStages( exec.get(), verbosity, out );
- return Status::OK();
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
+
+ // Explain the plan tree.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
+ } else {
+ invariant(BatchedCommandRequest::BatchType_Delete == _writeType);
+
+ // Create the delete request.
+ DeleteRequest deleteRequest(nsString);
+ deleteRequest.setQuery(batchItem.getDelete()->getQuery());
+ deleteRequest.setMulti(batchItem.getDelete()->getLimit() != 1);
+ deleteRequest.setGod(false);
+ deleteRequest.setExplain();
+
+ // Explained deletes can yield.
+ deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
+
+ ParsedDelete parsedDelete(txn, &deleteRequest);
+ Status parseStatus = parsedDelete.parseRequest();
+ if (!parseStatus.isOK()) {
+ return parseStatus;
}
- else {
- invariant( BatchedCommandRequest::BatchType_Delete == _writeType );
-
- // Create the delete request.
- DeleteRequest deleteRequest( nsString );
- deleteRequest.setQuery( batchItem.getDelete()->getQuery() );
- deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 );
- deleteRequest.setGod( false );
- deleteRequest.setExplain();
-
- // Explained deletes can yield.
- deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO);
-
- ParsedDelete parsedDelete(txn, &deleteRequest);
- Status parseStatus = parsedDelete.parseRequest();
- if (!parseStatus.isOK()) {
- return parseStatus;
- }
-
- // Explains of write commands are read-only, but we take write locks so that timing
- // info is more accurate.
- AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
- Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
-
- ensureShardVersionOKOrThrow( txn->getClient(), nsString.ns() );
-
- // Get a pointer to the (possibly NULL) collection.
- Collection* collection = NULL;
- if (autoDb.getDb()) {
- collection = autoDb.getDb()->getCollection(nsString.ns());
- }
-
- PlanExecutor* rawExec;
- uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
- std::unique_ptr<PlanExecutor> exec(rawExec);
-
- // Explain the plan tree.
- Explain::explainStages(exec.get(), verbosity, out);
- return Status::OK();
+
+ // Explains of write commands are read-only, but we take write locks so that timing
+ // info is more accurate.
+ AutoGetDb autoDb(txn, nsString.db(), MODE_IX);
+ Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX);
+
+ ensureShardVersionOKOrThrow(txn->getClient(), nsString.ns());
+
+ // Get a pointer to the (possibly NULL) collection.
+ Collection* collection = NULL;
+ if (autoDb.getDb()) {
+ collection = autoDb.getDb()->getCollection(nsString.ns());
}
- }
- CmdInsert::CmdInsert() :
- WriteCmd( "insert", BatchedCommandRequest::BatchType_Insert ) {
- }
+ PlanExecutor* rawExec;
+ uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec));
+ std::unique_ptr<PlanExecutor> exec(rawExec);
- void CmdInsert::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "documents", StringData::LiteralTag() ) );
+ // Explain the plan tree.
+ Explain::explainStages(exec.get(), verbosity, out);
+ return Status::OK();
}
+}
- void CmdInsert::help( stringstream& help ) const {
- help << "insert documents";
- }
+CmdInsert::CmdInsert() : WriteCmd("insert", BatchedCommandRequest::BatchType_Insert) {}
- CmdUpdate::CmdUpdate() :
- WriteCmd( "update", BatchedCommandRequest::BatchType_Update ) {
- }
+void CmdInsert::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("documents", StringData::LiteralTag()));
+}
- void CmdUpdate::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "updates", StringData::LiteralTag() ) );
- }
+void CmdInsert::help(stringstream& help) const {
+ help << "insert documents";
+}
- void CmdUpdate::help( stringstream& help ) const {
- help << "update documents";
- }
+CmdUpdate::CmdUpdate() : WriteCmd("update", BatchedCommandRequest::BatchType_Update) {}
- CmdDelete::CmdDelete() :
- WriteCmd( "delete", BatchedCommandRequest::BatchType_Delete ) {
- }
+void CmdUpdate::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("updates", StringData::LiteralTag()));
+}
- void CmdDelete::redactForLogging( mutablebson::Document* cmdObj ) {
- redactTooLongLog( cmdObj, StringData( "deletes", StringData::LiteralTag() ) );
- }
+void CmdUpdate::help(stringstream& help) const {
+ help << "update documents";
+}
- void CmdDelete::help( stringstream& help ) const {
- help << "delete documents";
- }
+CmdDelete::CmdDelete() : WriteCmd("delete", BatchedCommandRequest::BatchType_Delete) {}
+
+void CmdDelete::redactForLogging(mutablebson::Document* cmdObj) {
+ redactTooLongLog(cmdObj, StringData("deletes", StringData::LiteralTag()));
+}
+
+void CmdDelete::help(stringstream& help) const {
+ help << "delete documents";
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands.h b/src/mongo/db/commands/write_commands/write_commands.h
index cbb2db6cac6..fcdda1b56fd 100644
--- a/src/mongo/db/commands/write_commands/write_commands.h
+++ b/src/mongo/db/commands/write_commands/write_commands.h
@@ -36,89 +36,91 @@
namespace mongo {
+/**
+ * Base class for write commands. Write commands support batch writes and write concern,
+ * and return per-item error information. All write commands use the (non-virtual) entry
+ * point WriteCmd::run().
+ *
+ * Command parsing is performed by the WriteBatch class (command syntax documented there),
+ * and command execution is performed by the WriteBatchExecutor class.
+ */
+class WriteCmd : public Command {
+ MONGO_DISALLOW_COPYING(WriteCmd);
+
+public:
+ virtual ~WriteCmd() {}
+
+protected:
/**
- * Base class for write commands. Write commands support batch writes and write concern,
- * and return per-item error information. All write commands use the (non-virtual) entry
- * point WriteCmd::run().
- *
- * Command parsing is performed by the WriteBatch class (command syntax documented there),
- * and command execution is performed by the WriteBatchExecutor class.
+ * Instantiates a command that can be invoked by "name", which will be capable of issuing
+ * write batches of type "writeType", and will require privilege "action" to run.
*/
- class WriteCmd : public Command {
- MONGO_DISALLOW_COPYING(WriteCmd);
- public:
- virtual ~WriteCmd() {}
-
- protected:
-
- /**
- * Instantiates a command that can be invoked by "name", which will be capable of issuing
- * write batches of type "writeType", and will require privilege "action" to run.
- */
- WriteCmd( StringData name, BatchedCommandRequest::BatchType writeType );
-
- // Full log of write command can be quite large.
- static void redactTooLongLog( mutablebson::Document* cmdObj, StringData fieldName );
-
- private:
- virtual bool slaveOk() const;
-
- virtual bool isWriteCommandForConfigServer() const;
-
- virtual Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj );
-
- virtual bool shouldAffectCommandCounter() const;
-
- // Write command entry point.
- virtual bool run(
- OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- // Write commands can be explained.
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const;
-
- // Type of batch (e.g. insert).
- BatchedCommandRequest::BatchType _writeType;
- };
-
- class CmdInsert : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdInsert);
- public:
- CmdInsert();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
- class CmdUpdate : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdUpdate);
- public:
- CmdUpdate();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
- class CmdDelete : public WriteCmd {
- MONGO_DISALLOW_COPYING(CmdDelete);
- public:
- CmdDelete();
- void redactForLogging(mutablebson::Document* cmdObj);
-
- private:
- virtual void help(std::stringstream& help) const;
- };
-
-} // namespace mongo
+ WriteCmd(StringData name, BatchedCommandRequest::BatchType writeType);
+
+ // Full log of write command can be quite large.
+ static void redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName);
+
+private:
+ virtual bool slaveOk() const;
+
+ virtual bool isWriteCommandForConfigServer() const;
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj);
+
+ virtual bool shouldAffectCommandCounter() const;
+
+ // Write command entry point.
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+ // Write commands can be explained.
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const;
+
+ // Type of batch (e.g. insert).
+ BatchedCommandRequest::BatchType _writeType;
+};
+
+class CmdInsert : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdInsert);
+
+public:
+ CmdInsert();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+class CmdUpdate : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdUpdate);
+
+public:
+ CmdUpdate();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+class CmdDelete : public WriteCmd {
+ MONGO_DISALLOW_COPYING(CmdDelete);
+
+public:
+ CmdDelete();
+ void redactForLogging(mutablebson::Document* cmdObj);
+
+private:
+ virtual void help(std::stringstream& help) const;
+};
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands/write_commands_common.cpp
index 69ca1014140..82f3ab4db67 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands_common.cpp
@@ -42,62 +42,55 @@
namespace mongo {
namespace auth {
- using std::string;
- using std::vector;
-
- Status checkAuthForWriteCommand( AuthorizationSession* authzSession,
- BatchedCommandRequest::BatchType cmdType,
- const NamespaceString& cmdNSS,
- const BSONObj& cmdObj ) {
-
- vector<Privilege> privileges;
- ActionSet actionsOnCommandNSS;
-
- if (shouldBypassDocumentValidationForCommand(cmdObj)) {
- actionsOnCommandNSS.addAction(ActionType::bypassDocumentValidation);
- }
-
- if ( cmdType == BatchedCommandRequest::BatchType_Insert ) {
+using std::string;
+using std::vector;
+
+Status checkAuthForWriteCommand(AuthorizationSession* authzSession,
+ BatchedCommandRequest::BatchType cmdType,
+ const NamespaceString& cmdNSS,
+ const BSONObj& cmdObj) {
+ vector<Privilege> privileges;
+ ActionSet actionsOnCommandNSS;
+
+ if (shouldBypassDocumentValidationForCommand(cmdObj)) {
+ actionsOnCommandNSS.addAction(ActionType::bypassDocumentValidation);
+ }
- if ( !cmdNSS.isSystemDotIndexes() ) {
- actionsOnCommandNSS.addAction(ActionType::insert);
+ if (cmdType == BatchedCommandRequest::BatchType_Insert) {
+ if (!cmdNSS.isSystemDotIndexes()) {
+ actionsOnCommandNSS.addAction(ActionType::insert);
+ } else {
+ // Special-case indexes until we have a command
+ string nsToIndex, errMsg;
+ if (!BatchedCommandRequest::getIndexedNS(cmdObj, &nsToIndex, &errMsg)) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
}
- else {
- // Special-case indexes until we have a command
- string nsToIndex, errMsg;
- if ( !BatchedCommandRequest::getIndexedNS( cmdObj, &nsToIndex, &errMsg ) ) {
- return Status( ErrorCodes::FailedToParse, errMsg );
- }
- NamespaceString nssToIndex( nsToIndex );
- privileges.push_back( Privilege( ResourcePattern::forExactNamespace( nssToIndex ),
- ActionType::createIndex ) );
- }
+ NamespaceString nssToIndex(nsToIndex);
+ privileges.push_back(
+ Privilege(ResourcePattern::forExactNamespace(nssToIndex), ActionType::createIndex));
}
- else if ( cmdType == BatchedCommandRequest::BatchType_Update ) {
- actionsOnCommandNSS.addAction(ActionType::update);
+ } else if (cmdType == BatchedCommandRequest::BatchType_Update) {
+ actionsOnCommandNSS.addAction(ActionType::update);
- // Upsert also requires insert privs
- if ( BatchedCommandRequest::containsUpserts( cmdObj ) ) {
- actionsOnCommandNSS.addAction(ActionType::insert);
- }
- }
- else {
- fassert( 17251, cmdType == BatchedCommandRequest::BatchType_Delete );
- actionsOnCommandNSS.addAction(ActionType::remove);
- }
-
-
- if (!actionsOnCommandNSS.empty()) {
- privileges.emplace_back(ResourcePattern::forExactNamespace(cmdNSS),
- actionsOnCommandNSS);
+ // Upsert also requires insert privs
+ if (BatchedCommandRequest::containsUpserts(cmdObj)) {
+ actionsOnCommandNSS.addAction(ActionType::insert);
}
+ } else {
+ fassert(17251, cmdType == BatchedCommandRequest::BatchType_Delete);
+ actionsOnCommandNSS.addAction(ActionType::remove);
+ }
- if ( authzSession->isAuthorizedForPrivileges( privileges ) )
- return Status::OK();
- return Status( ErrorCodes::Unauthorized, "unauthorized" );
+ if (!actionsOnCommandNSS.empty()) {
+ privileges.emplace_back(ResourcePattern::forExactNamespace(cmdNSS), actionsOnCommandNSS);
}
+ if (authzSession->isAuthorizedForPrivileges(privileges))
+ return Status::OK();
+
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.h b/src/mongo/db/commands/write_commands/write_commands_common.h
index a1fe6bc9772..cf47bdc02b1 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.h
+++ b/src/mongo/db/commands/write_commands/write_commands_common.h
@@ -40,10 +40,9 @@
namespace mongo {
namespace auth {
- Status checkAuthForWriteCommand( AuthorizationSession* authzSession,
- BatchedCommandRequest::BatchType cmdType,
- const NamespaceString& cmdNSS,
- const BSONObj& cmdObj );
-
+Status checkAuthForWriteCommand(AuthorizationSession* authzSession,
+ BatchedCommandRequest::BatchType cmdType,
+ const NamespaceString& cmdNSS,
+ const BSONObj& cmdObj);
}
}
diff --git a/src/mongo/db/commands/writeback_compatibility_shim.cpp b/src/mongo/db/commands/writeback_compatibility_shim.cpp
index 99feccfad58..b03cf3b21dc 100644
--- a/src/mongo/db/commands/writeback_compatibility_shim.cpp
+++ b/src/mongo/db/commands/writeback_compatibility_shim.cpp
@@ -43,85 +43,85 @@
namespace mongo {
- using std::string;
- using std::stringstream;
-
- using mongoutils::str::stream;
-
- /**
- * This command is required in v3.0 mongod to prevent v2.6 mongos from entering a tight loop and
- * spamming the server with invalid writebacklisten requests. This command reports an error
- * and pauses, which is safe because the original v2.6 WBL command was a long-poll (30s).
- */
- class WriteBackCommand : public Command {
- public:
- WriteBackCommand() : Command("writebacklisten") {}
-
- void help(stringstream& helpOut) const {
- helpOut << "v3.0 disallowed internal command, present for compatibility only";
- }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- //
- // Same as v2.6 settings
- //
-
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return true; }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::internal);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- virtual bool run(OperationContext* opCtx,
- const string&,
- BSONObj&,
- int,
- string&,
- BSONObjBuilder& result) {
-
- string errMsg = stream()
- << "Writeback functionality is no longer present in v3.0 mongod, "
- << "a v2.6 mongos may be running in the v3.0 cluster at "
- << opCtx->getClient()->clientAddress(false);
-
- error() << errMsg;
-
- // Prevent v2.6 mongos from spamming writebacklisten retries
- const int kSleepSecsBeforeMessage = 5;
- sleepsecs(kSleepSecsBeforeMessage);
-
- return appendCommandStatus(result, Status(ErrorCodes::CommandNotFound, errMsg));
- }
- };
-
- /**
- * The "writeBacksQueued" field is required in ServerStatus output to avoid v2.6 mongos crashing
- * confusingly when upgrading a cluster.
- */
- class WriteBacksQueuedSSM : public ServerStatusMetric {
- public:
- WriteBacksQueuedSSM() : ServerStatusMetric(".writeBacksQueued") {}
-
- virtual void appendAtLeaf(BSONObjBuilder& b) const {
- // always append false, we don't queue writebacks
- b.appendBool(_leafName, false);
- }
- };
-
- namespace {
- MONGO_INITIALIZER(RegisterWriteBackShim)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new WriteBackCommand();
- // Leaked intentionally: a SSM registers itself when constructed.
- new WriteBacksQueuedSSM();
- return Status::OK();
- }
+using std::string;
+using std::stringstream;
+
+using mongoutils::str::stream;
+
+/**
+ * This command is required in v3.0 mongod to prevent v2.6 mongos from entering a tight loop and
+ * spamming the server with invalid writebacklisten requests. This command reports an error
+ * and pauses, which is safe because the original v2.6 WBL command was a long-poll (30s).
+ */
+class WriteBackCommand : public Command {
+public:
+ WriteBackCommand() : Command("writebacklisten") {}
+
+ void help(stringstream& helpOut) const {
+ helpOut << "v3.0 disallowed internal command, present for compatibility only";
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ //
+ // Same as v2.6 settings
+ //
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
}
-} // namespace
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::internal);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(
+ OperationContext* opCtx, const string&, BSONObj&, int, string&, BSONObjBuilder& result) {
+ string errMsg = stream() << "Writeback functionality is no longer present in v3.0 mongod, "
+ << "a v2.6 mongos may be running in the v3.0 cluster at "
+ << opCtx->getClient()->clientAddress(false);
+
+ error() << errMsg;
+
+ // Prevent v2.6 mongos from spamming writebacklisten retries
+ const int kSleepSecsBeforeMessage = 5;
+ sleepsecs(kSleepSecsBeforeMessage);
+
+ return appendCommandStatus(result, Status(ErrorCodes::CommandNotFound, errMsg));
+ }
+};
+
+/**
+ * The "writeBacksQueued" field is required in ServerStatus output to avoid v2.6 mongos crashing
+ * confusingly when upgrading a cluster.
+ */
+class WriteBacksQueuedSSM : public ServerStatusMetric {
+public:
+ WriteBacksQueuedSSM() : ServerStatusMetric(".writeBacksQueued") {}
+
+ virtual void appendAtLeaf(BSONObjBuilder& b) const {
+ // always append false, we don't queue writebacks
+ b.appendBool(_leafName, false);
+ }
+};
+
+namespace {
+MONGO_INITIALIZER(RegisterWriteBackShim)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new WriteBackCommand();
+ // Leaked intentionally: a SSM registers itself when constructed.
+ new WriteBacksQueuedSSM();
+ return Status::OK();
+}
+}
+
+} // namespace