summaryrefslogtreecommitdiff
path: root/src/mongo/s/write_ops
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 00:22:50 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:02 -0400
commit9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (patch)
tree3814f79c10d7b490948d8cb7b112ac1dd41ceff1 /src/mongo/s/write_ops
parent01965cf52bce6976637ecb8f4a622aeb05ab256a (diff)
downloadmongo-9c2ed42daa8fbbef4a919c21ec564e2db55e8d60.tar.gz
SERVER-18579: Clang-Format - reformat code, no comment reflow
Diffstat (limited to 'src/mongo/s/write_ops')
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.cpp448
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.h78
-rw-r--r--src/mongo/s/write_ops/batch_downconvert_test.cpp359
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.cpp329
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.h39
-rw-r--r--src/mongo/s/write_ops/batch_upconvert_test.cpp213
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp547
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h136
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp431
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp1338
-rw-r--r--src/mongo/s/write_ops/batch_write_op.h422
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp3278
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp528
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h359
-rw-r--r--src/mongo/s/write_ops/batched_command_response.cpp895
-rw-r--r--src/mongo/s/write_ops/batched_command_response.h316
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp75
-rw-r--r--src/mongo/s/write_ops/batched_delete_document.cpp193
-rw-r--r--src/mongo/s/write_ops/batched_delete_document.h124
-rw-r--r--src/mongo/s/write_ops/batched_delete_request.cpp468
-rw-r--r--src/mongo/s/write_ops/batched_delete_request.h226
-rw-r--r--src/mongo/s/write_ops/batched_delete_request_test.cpp77
-rw-r--r--src/mongo/s/write_ops/batched_insert_request.cpp505
-rw-r--r--src/mongo/s/write_ops/batched_insert_request.h224
-rw-r--r--src/mongo/s/write_ops/batched_insert_request_test.cpp220
-rw-r--r--src/mongo/s/write_ops/batched_request_metadata.cpp217
-rw-r--r--src/mongo/s/write_ops/batched_request_metadata.h109
-rw-r--r--src/mongo/s/write_ops/batched_request_metadata_test.cpp58
-rw-r--r--src/mongo/s/write_ops/batched_update_document.cpp312
-rw-r--r--src/mongo/s/write_ops/batched_update_document.h164
-rw-r--r--src/mongo/s/write_ops/batched_update_request.cpp522
-rw-r--r--src/mongo/s/write_ops/batched_update_request.h226
-rw-r--r--src/mongo/s/write_ops/batched_update_request_test.cpp85
-rw-r--r--src/mongo/s/write_ops/batched_upsert_detail.cpp184
-rw-r--r--src/mongo/s/write_ops/batched_upsert_detail.h124
-rw-r--r--src/mongo/s/write_ops/wc_error_detail.cpp223
-rw-r--r--src/mongo/s/write_ops/wc_error_detail.h144
-rw-r--r--src/mongo/s/write_ops/write_error_detail.cpp287
-rw-r--r--src/mongo/s/write_ops/write_error_detail.h164
-rw-r--r--src/mongo/s/write_ops/write_op.cpp391
-rw-r--r--src/mongo/s/write_ops/write_op.h332
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp509
42 files changed, 7800 insertions, 8079 deletions
diff --git a/src/mongo/s/write_ops/batch_downconvert.cpp b/src/mongo/s/write_ops/batch_downconvert.cpp
index 3bcafcf31a1..f786606d9b6 100644
--- a/src/mongo/s/write_ops/batch_downconvert.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert.cpp
@@ -40,274 +40,254 @@
namespace mongo {
- using std::endl;
- using std::string;
- using std::vector;
-
- Status extractGLEErrors( const BSONObj& gleResponse, GLEErrors* errors ) {
-
- // DRAGONS
- // Parsing GLE responses is incredibly finicky.
- // The order of testing here is extremely important.
-
- ///////////////////////////////////////////////////////////////////////
- // IMPORTANT!
- // Also update extractGLEErrors in batch_api.js for any changes made here.
-
- const bool isOK = gleResponse["ok"].trueValue();
- const string err = gleResponse["err"].str();
- const string errMsg = gleResponse["errmsg"].str();
- const string wNote = gleResponse["wnote"].str();
- const string jNote = gleResponse["jnote"].str();
- const int code = gleResponse["code"].numberInt();
- const bool timeout = gleResponse["wtimeout"].trueValue();
-
- if ( err == "norepl" || err == "noreplset" ) {
- // Know this is legacy gle and the repl not enforced - write concern error in 2.4
- errors->wcError.reset( new WCErrorDetail );
- errors->wcError->setErrCode( ErrorCodes::WriteConcernFailed );
- if ( !errMsg.empty() ) {
- errors->wcError->setErrMessage( errMsg );
- }
- else if ( !wNote.empty() ) {
- errors->wcError->setErrMessage( wNote );
- }
- else {
- errors->wcError->setErrMessage( err );
- }
- }
- else if ( timeout ) {
- // Know there was no write error
- errors->wcError.reset( new WCErrorDetail );
- errors->wcError->setErrCode( ErrorCodes::WriteConcernFailed );
- if ( !errMsg.empty() ) {
- errors->wcError->setErrMessage( errMsg );
- }
- else {
- errors->wcError->setErrMessage( err );
- }
- errors->wcError->setErrInfo( BSON( "wtimeout" << true ) );
- }
- else if ( code == 10990 /* no longer primary */
- || code == 16805 /* replicatedToNum no longer primary */
- || code == 14830 /* gle wmode changed / invalid */
- // 2.6 Error codes
- || code == ErrorCodes::NotMaster
- || code == ErrorCodes::UnknownReplWriteConcern
- || code == ErrorCodes::WriteConcernFailed ) {
- // Write concern errors that get returned as regular errors (result may not be ok: 1.0)
- errors->wcError.reset( new WCErrorDetail );
- errors->wcError->setErrCode( code );
- errors->wcError->setErrMessage( errMsg );
+using std::endl;
+using std::string;
+using std::vector;
+
+Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors) {
+ // DRAGONS
+ // Parsing GLE responses is incredibly finicky.
+ // The order of testing here is extremely important.
+
+ ///////////////////////////////////////////////////////////////////////
+ // IMPORTANT!
+ // Also update extractGLEErrors in batch_api.js for any changes made here.
+
+ const bool isOK = gleResponse["ok"].trueValue();
+ const string err = gleResponse["err"].str();
+ const string errMsg = gleResponse["errmsg"].str();
+ const string wNote = gleResponse["wnote"].str();
+ const string jNote = gleResponse["jnote"].str();
+ const int code = gleResponse["code"].numberInt();
+ const bool timeout = gleResponse["wtimeout"].trueValue();
+
+ if (err == "norepl" || err == "noreplset") {
+ // Know this is legacy gle and the repl not enforced - write concern error in 2.4
+ errors->wcError.reset(new WCErrorDetail);
+ errors->wcError->setErrCode(ErrorCodes::WriteConcernFailed);
+ if (!errMsg.empty()) {
+ errors->wcError->setErrMessage(errMsg);
+ } else if (!wNote.empty()) {
+ errors->wcError->setErrMessage(wNote);
+ } else {
+ errors->wcError->setErrMessage(err);
}
- else if ( !isOK ) {
-
- //
- // !!! SOME GLE ERROR OCCURRED, UNKNOWN WRITE RESULT !!!
- //
-
- return Status( DBException::convertExceptionCode(
- code ? code : ErrorCodes::UnknownError ),
- errMsg );
+ } else if (timeout) {
+ // Know there was no write error
+ errors->wcError.reset(new WCErrorDetail);
+ errors->wcError->setErrCode(ErrorCodes::WriteConcernFailed);
+ if (!errMsg.empty()) {
+ errors->wcError->setErrMessage(errMsg);
+ } else {
+ errors->wcError->setErrMessage(err);
}
- else if ( !err.empty() ) {
- // Write error
- errors->writeError.reset( new WriteErrorDetail );
- int writeErrorCode = code == 0 ? ErrorCodes::UnknownError : code;
-
- // COMPATIBILITY
- // Certain clients expect write commands to always report 11000 for duplicate key
- // errors, while legacy GLE can return additional codes.
- if ( writeErrorCode == 11001 /* dup key in update */
- || writeErrorCode == 12582 /* dup key capped */) {
- writeErrorCode = ErrorCodes::DuplicateKey;
- }
-
- errors->writeError->setErrCode( writeErrorCode );
- errors->writeError->setErrMessage( err );
- }
- else if ( !jNote.empty() ) {
- // Know this is legacy gle and the journaling not enforced - write concern error in 2.4
- errors->wcError.reset( new WCErrorDetail );
- errors->wcError->setErrCode( ErrorCodes::WriteConcernFailed );
- errors->wcError->setErrMessage( jNote );
+ errors->wcError->setErrInfo(BSON("wtimeout" << true));
+ } else if (code == 10990 /* no longer primary */
+ ||
+ code == 16805 /* replicatedToNum no longer primary */
+ ||
+ code == 14830 /* gle wmode changed / invalid */
+ // 2.6 Error codes
+ ||
+ code == ErrorCodes::NotMaster || code == ErrorCodes::UnknownReplWriteConcern ||
+ code == ErrorCodes::WriteConcernFailed) {
+ // Write concern errors that get returned as regular errors (result may not be ok: 1.0)
+ errors->wcError.reset(new WCErrorDetail);
+ errors->wcError->setErrCode(code);
+ errors->wcError->setErrMessage(errMsg);
+ } else if (!isOK) {
+ //
+ // !!! SOME GLE ERROR OCCURRED, UNKNOWN WRITE RESULT !!!
+ //
+
+ return Status(DBException::convertExceptionCode(code ? code : ErrorCodes::UnknownError),
+ errMsg);
+ } else if (!err.empty()) {
+ // Write error
+ errors->writeError.reset(new WriteErrorDetail);
+ int writeErrorCode = code == 0 ? ErrorCodes::UnknownError : code;
+
+ // COMPATIBILITY
+ // Certain clients expect write commands to always report 11000 for duplicate key
+ // errors, while legacy GLE can return additional codes.
+ if (writeErrorCode == 11001 /* dup key in update */
+ ||
+ writeErrorCode == 12582 /* dup key capped */) {
+ writeErrorCode = ErrorCodes::DuplicateKey;
}
- return Status::OK();
+ errors->writeError->setErrCode(writeErrorCode);
+ errors->writeError->setErrMessage(err);
+ } else if (!jNote.empty()) {
+ // Know this is legacy gle and the journaling not enforced - write concern error in 2.4
+ errors->wcError.reset(new WCErrorDetail);
+ errors->wcError->setErrCode(ErrorCodes::WriteConcernFailed);
+ errors->wcError->setErrMessage(jNote);
}
- /**
- * Suppress the "err" and "code" field if they are coming from a previous write error and
- * are not related to write concern. Also removes any write stats information (e.g. "n")
- *
- * Also, In some cases, 2.4 GLE w/ wOpTime can give us duplicate "err" and "code" fields b/c of
- * reporting a previous error. The later field is what we want - dedup and use later field.
- *
- * Returns the stripped GLE response.
- */
- BSONObj stripNonWCInfo( const BSONObj& gleResponse ) {
-
- BSONObjIterator it( gleResponse );
- BSONObjBuilder builder;
-
- BSONElement codeField; // eoo
- BSONElement errField; // eoo
-
- while ( it.more() ) {
- BSONElement el = it.next();
- StringData fieldName( el.fieldName() );
- if ( fieldName.compare( "err" ) == 0 ) {
- errField = el;
- }
- else if ( fieldName.compare( "code" ) == 0 ) {
- codeField = el;
- }
- else if ( fieldName.compare( "n" ) == 0 || fieldName.compare( "nModified" ) == 0
- || fieldName.compare( "upserted" ) == 0
- || fieldName.compare( "updatedExisting" ) == 0 ) {
- // Suppress field
- }
- else {
- builder.append( el );
- }
- }
+ return Status::OK();
+}
- if ( !codeField.eoo() ) {
- if ( !gleResponse["ok"].trueValue() ) {
- // The last code will be from the write concern
- builder.append( codeField );
- }
- else {
- // The code is from a non-wc error on this connection - suppress it
- }
+/**
+ * Suppress the "err" and "code" field if they are coming from a previous write error and
+ * are not related to write concern. Also removes any write stats information (e.g. "n")
+ *
+ * Also, In some cases, 2.4 GLE w/ wOpTime can give us duplicate "err" and "code" fields b/c of
+ * reporting a previous error. The later field is what we want - dedup and use later field.
+ *
+ * Returns the stripped GLE response.
+ */
+BSONObj stripNonWCInfo(const BSONObj& gleResponse) {
+ BSONObjIterator it(gleResponse);
+ BSONObjBuilder builder;
+
+ BSONElement codeField; // eoo
+ BSONElement errField; // eoo
+
+ while (it.more()) {
+ BSONElement el = it.next();
+ StringData fieldName(el.fieldName());
+ if (fieldName.compare("err") == 0) {
+ errField = el;
+ } else if (fieldName.compare("code") == 0) {
+ codeField = el;
+ } else if (fieldName.compare("n") == 0 || fieldName.compare("nModified") == 0 ||
+ fieldName.compare("upserted") == 0 ||
+ fieldName.compare("updatedExisting") == 0) {
+ // Suppress field
+ } else {
+ builder.append(el);
}
+ }
- if ( !errField.eoo() ) {
- string err = errField.str();
- if ( err == "norepl" || err == "noreplset" || err == "timeout" ) {
- // Append err if it's from a write concern issue
- builder.append( errField );
- }
- else {
- // Suppress non-write concern err as null, but we need to report null err if ok
- if ( gleResponse["ok"].trueValue() )
- builder.appendNull( errField.fieldName() );
- }
+ if (!codeField.eoo()) {
+ if (!gleResponse["ok"].trueValue()) {
+ // The last code will be from the write concern
+ builder.append(codeField);
+ } else {
+ // The code is from a non-wc error on this connection - suppress it
}
-
- return builder.obj();
}
- // Adds a wOpTime and a wElectionId field to a set of gle options
- static BSONObj buildGLECmdWithOpTime( const BSONObj& gleOptions,
- const Timestamp& opTime,
- const OID& electionId ) {
- BSONObjBuilder builder;
- BSONObjIterator it( gleOptions );
-
- for ( int i = 0; it.more(); ++i ) {
- BSONElement el = it.next();
-
- // Make sure first element is getLastError : 1
- if ( i == 0 ) {
- StringData elName( el.fieldName() );
- if ( !elName.equalCaseInsensitive( "getLastError" ) ) {
- builder.append( "getLastError", 1 );
- }
- }
-
- builder.append( el );
+ if (!errField.eoo()) {
+ string err = errField.str();
+ if (err == "norepl" || err == "noreplset" || err == "timeout") {
+ // Append err if it's from a write concern issue
+ builder.append(errField);
+ } else {
+ // Suppress non-write concern err as null, but we need to report null err if ok
+ if (gleResponse["ok"].trueValue())
+ builder.appendNull(errField.fieldName());
}
- builder.append( "wOpTime", opTime );
- builder.appendOID( "wElectionId", const_cast<OID*>(&electionId) );
- return builder.obj();
}
- Status enforceLegacyWriteConcern( MultiCommandDispatch* dispatcher,
- StringData dbName,
- const BSONObj& options,
- const HostOpTimeMap& hostOpTimes,
- vector<LegacyWCResponse>* legacyWCResponses ) {
+ return builder.obj();
+}
- if ( hostOpTimes.empty() ) {
- return Status::OK();
+// Adds a wOpTime and a wElectionId field to a set of gle options
+static BSONObj buildGLECmdWithOpTime(const BSONObj& gleOptions,
+ const Timestamp& opTime,
+ const OID& electionId) {
+ BSONObjBuilder builder;
+ BSONObjIterator it(gleOptions);
+
+ for (int i = 0; it.more(); ++i) {
+ BSONElement el = it.next();
+
+ // Make sure first element is getLastError : 1
+ if (i == 0) {
+ StringData elName(el.fieldName());
+ if (!elName.equalCaseInsensitive("getLastError")) {
+ builder.append("getLastError", 1);
+ }
}
- for ( HostOpTimeMap::const_iterator it = hostOpTimes.begin(); it != hostOpTimes.end();
- ++it ) {
-
- const ConnectionString& shardEndpoint = it->first;
- const HostOpTime hot = it->second;
- const Timestamp& opTime = hot.opTime;
- const OID& electionId = hot.electionId;
-
- LOG( 3 ) << "enforcing write concern " << options << " on " << shardEndpoint.toString()
- << " at opTime " << opTime.toStringPretty() << " with electionID "
- << electionId;
+ builder.append(el);
+ }
+ builder.append("wOpTime", opTime);
+ builder.appendOID("wElectionId", const_cast<OID*>(&electionId));
+ return builder.obj();
+}
- BSONObj gleCmd = buildGLECmdWithOpTime( options, opTime, electionId );
+Status enforceLegacyWriteConcern(MultiCommandDispatch* dispatcher,
+ StringData dbName,
+ const BSONObj& options,
+ const HostOpTimeMap& hostOpTimes,
+ vector<LegacyWCResponse>* legacyWCResponses) {
+ if (hostOpTimes.empty()) {
+ return Status::OK();
+ }
- RawBSONSerializable gleCmdSerial( gleCmd );
- dispatcher->addCommand( shardEndpoint, dbName, gleCmdSerial );
- }
+ for (HostOpTimeMap::const_iterator it = hostOpTimes.begin(); it != hostOpTimes.end(); ++it) {
+ const ConnectionString& shardEndpoint = it->first;
+ const HostOpTime hot = it->second;
+ const Timestamp& opTime = hot.opTime;
+ const OID& electionId = hot.electionId;
- dispatcher->sendAll();
+ LOG(3) << "enforcing write concern " << options << " on " << shardEndpoint.toString()
+ << " at opTime " << opTime.toStringPretty() << " with electionID " << electionId;
- vector<Status> failedStatuses;
+ BSONObj gleCmd = buildGLECmdWithOpTime(options, opTime, electionId);
- while ( dispatcher->numPending() > 0 ) {
+ RawBSONSerializable gleCmdSerial(gleCmd);
+ dispatcher->addCommand(shardEndpoint, dbName, gleCmdSerial);
+ }
- ConnectionString shardEndpoint;
- RawBSONSerializable gleResponseSerial;
+ dispatcher->sendAll();
- Status dispatchStatus = dispatcher->recvAny( &shardEndpoint, &gleResponseSerial );
- if ( !dispatchStatus.isOK() ) {
- // We need to get all responses before returning
- failedStatuses.push_back( dispatchStatus );
- continue;
- }
+ vector<Status> failedStatuses;
- BSONObj gleResponse = stripNonWCInfo( gleResponseSerial.toBSON() );
+ while (dispatcher->numPending() > 0) {
+ ConnectionString shardEndpoint;
+ RawBSONSerializable gleResponseSerial;
- // Use the downconversion tools to determine if this GLE response is ok, a
- // write concern error, or an unknown error we should immediately abort for.
- GLEErrors errors;
- Status extractStatus = extractGLEErrors( gleResponse, &errors );
- if ( !extractStatus.isOK() ) {
- failedStatuses.push_back( extractStatus );
- continue;
- }
+ Status dispatchStatus = dispatcher->recvAny(&shardEndpoint, &gleResponseSerial);
+ if (!dispatchStatus.isOK()) {
+ // We need to get all responses before returning
+ failedStatuses.push_back(dispatchStatus);
+ continue;
+ }
- LegacyWCResponse wcResponse;
- wcResponse.shardHost = shardEndpoint.toString();
- wcResponse.gleResponse = gleResponse;
- if ( errors.wcError.get() ) {
- wcResponse.errToReport = errors.wcError->getErrMessage();
- }
+ BSONObj gleResponse = stripNonWCInfo(gleResponseSerial.toBSON());
- legacyWCResponses->push_back( wcResponse );
+ // Use the downconversion tools to determine if this GLE response is ok, a
+ // write concern error, or an unknown error we should immediately abort for.
+ GLEErrors errors;
+ Status extractStatus = extractGLEErrors(gleResponse, &errors);
+ if (!extractStatus.isOK()) {
+ failedStatuses.push_back(extractStatus);
+ continue;
}
- if ( failedStatuses.empty() ) {
- return Status::OK();
+ LegacyWCResponse wcResponse;
+ wcResponse.shardHost = shardEndpoint.toString();
+ wcResponse.gleResponse = gleResponse;
+ if (errors.wcError.get()) {
+ wcResponse.errToReport = errors.wcError->getErrMessage();
}
- StringBuilder builder;
- builder << "could not enforce write concern";
+ legacyWCResponses->push_back(wcResponse);
+ }
- for ( vector<Status>::const_iterator it = failedStatuses.begin();
- it != failedStatuses.end(); ++it ) {
- const Status& failedStatus = *it;
- if ( it == failedStatuses.begin() ) {
- builder << causedBy( failedStatus.toString() );
- }
- else {
- builder << ":: and ::" << failedStatus.toString();
- }
- }
+ if (failedStatuses.empty()) {
+ return Status::OK();
+ }
+
+ StringBuilder builder;
+ builder << "could not enforce write concern";
- return Status( failedStatuses.size() == 1u ? failedStatuses.front().code() :
- ErrorCodes::MultipleErrorsOccurred,
- builder.str() );
+ for (vector<Status>::const_iterator it = failedStatuses.begin(); it != failedStatuses.end();
+ ++it) {
+ const Status& failedStatus = *it;
+ if (it == failedStatuses.begin()) {
+ builder << causedBy(failedStatus.toString());
+ } else {
+ builder << ":: and ::" << failedStatus.toString();
+ }
}
+
+ return Status(failedStatuses.size() == 1u ? failedStatuses.front().code()
+ : ErrorCodes::MultipleErrorsOccurred,
+ builder.str());
+}
}
diff --git a/src/mongo/s/write_ops/batch_downconvert.h b/src/mongo/s/write_ops/batch_downconvert.h
index e3c6cc56f6a..659e59d3a9a 100644
--- a/src/mongo/s/write_ops/batch_downconvert.h
+++ b/src/mongo/s/write_ops/batch_downconvert.h
@@ -41,50 +41,48 @@
namespace mongo {
- class MultiCommandDispatch;
+class MultiCommandDispatch;
- // Used for reporting legacy write concern responses
- struct LegacyWCResponse {
- std::string shardHost;
- BSONObj gleResponse;
- std::string errToReport;
- };
+// Used for reporting legacy write concern responses
+struct LegacyWCResponse {
+ std::string shardHost;
+ BSONObj gleResponse;
+ std::string errToReport;
+};
- /**
- * Uses GLE and the shard hosts and opTimes last written by write commands to enforce a
- * write concern across the previously used shards.
- *
- * Returns OK with the LegacyWCResponses containing only write concern error information
- * Returns !OK if there was an error getting a GLE response
- */
- Status enforceLegacyWriteConcern( MultiCommandDispatch* dispatcher,
- StringData dbName,
- const BSONObj& options,
- const HostOpTimeMap& hostOpTimes,
- std::vector<LegacyWCResponse>* wcResponses );
-
- //
- // Below exposed for testing only
- //
-
- // Helper that acts as an auto-ptr for write and wc errors
- struct GLEErrors {
- std::unique_ptr<WriteErrorDetail> writeError;
- std::unique_ptr<WCErrorDetail> wcError;
- };
+/**
+ * Uses GLE and the shard hosts and opTimes last written by write commands to enforce a
+ * write concern across the previously used shards.
+ *
+ * Returns OK with the LegacyWCResponses containing only write concern error information
+ * Returns !OK if there was an error getting a GLE response
+ */
+Status enforceLegacyWriteConcern(MultiCommandDispatch* dispatcher,
+ StringData dbName,
+ const BSONObj& options,
+ const HostOpTimeMap& hostOpTimes,
+ std::vector<LegacyWCResponse>* wcResponses);
- /**
- * Given a GLE response, extracts a write error and a write concern error for the previous
- * operation.
- *
- * Returns !OK if the GLE itself failed in an unknown way.
- */
- Status extractGLEErrors( const BSONObj& gleResponse, GLEErrors* errors );
+//
+// Below exposed for testing only
+//
- /**
- * Given a GLE response, strips out all non-write-concern related information
- */
- BSONObj stripNonWCInfo( const BSONObj& gleResponse );
+// Helper that acts as an auto-ptr for write and wc errors
+struct GLEErrors {
+ std::unique_ptr<WriteErrorDetail> writeError;
+ std::unique_ptr<WCErrorDetail> wcError;
+};
+/**
+ * Given a GLE response, extracts a write error and a write concern error for the previous
+ * operation.
+ *
+ * Returns !OK if the GLE itself failed in an unknown way.
+ */
+Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors);
+/**
+ * Given a GLE response, strips out all non-write-concern related information
+ */
+BSONObj stripNonWCInfo(const BSONObj& gleResponse);
}
diff --git a/src/mongo/s/write_ops/batch_downconvert_test.cpp b/src/mongo/s/write_ops/batch_downconvert_test.cpp
index 8dd4b440a39..548bfb6f732 100644
--- a/src/mongo/s/write_ops/batch_downconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert_test.cpp
@@ -38,194 +38,179 @@
namespace {
- using namespace mongo;
- using std::vector;
- using std::deque;
-
- //
- // Tests for parsing GLE responses into write errors and write concern errors for write
- // commands. These tests essentially document our expected 2.4 GLE behaviors.
- //
+using namespace mongo;
+using std::vector;
+using std::deque;
+
+//
+// Tests for parsing GLE responses into write errors and write concern errors for write
+// commands. These tests essentially document our expected 2.4 GLE behaviors.
+//
+
+TEST(GLEParsing, Empty) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: null}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(!errors.wcError.get());
+}
+
+TEST(GLEParsing, WriteErr) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: 'message', code: 1000}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(errors.writeError.get());
+ ASSERT_EQUALS(errors.writeError->getErrMessage(), "message");
+ ASSERT_EQUALS(errors.writeError->getErrCode(), 1000);
+ ASSERT(!errors.wcError.get());
+}
+
+TEST(GLEParsing, JournalFail) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: null, jnote: 'message'}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(errors.wcError.get());
+ ASSERT_EQUALS(errors.wcError->getErrMessage(), "message");
+ ASSERT_EQUALS(errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed);
+}
+
+TEST(GLEParsing, ReplErr) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: 'norepl', wnote: 'message'}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(errors.wcError.get());
+ ASSERT_EQUALS(errors.wcError->getErrMessage(), "message");
+ ASSERT_EQUALS(errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed);
+}
+
+TEST(GLEParsing, ReplTimeoutErr) {
+ const BSONObj gleResponse =
+ fromjson("{ok: 1.0, err: 'timeout', errmsg: 'message', wtimeout: true}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(errors.wcError.get());
+ ASSERT_EQUALS(errors.wcError->getErrMessage(), "message");
+ ASSERT(errors.wcError->getErrInfo()["wtimeout"].trueValue());
+ ASSERT_EQUALS(errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed);
+}
+
+TEST(GLEParsing, GLEFail) {
+ const BSONObj gleResponse = fromjson("{ok: 0.0, err: null, errmsg: 'message', code: 1000}");
+
+ GLEErrors errors;
+ Status status = extractGLEErrors(gleResponse, &errors);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(status.reason(), "message");
+ ASSERT_EQUALS(status.code(), 1000);
+}
+
+TEST(GLEParsing, GLEFailNoCode) {
+ const BSONObj gleResponse = fromjson("{ok: 0.0, err: null, errmsg: 'message'}");
+
+ GLEErrors errors;
+ Status status = extractGLEErrors(gleResponse, &errors);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQUALS(status.reason(), "message");
+ ASSERT_EQUALS(status.code(), ErrorCodes::UnknownError);
+}
+
+TEST(GLEParsing, NotMasterGLEFail) {
+ // Not master code in response
+ const BSONObj gleResponse = fromjson("{ok: 0.0, err: null, errmsg: 'message', code: 10990}");
- TEST(GLEParsing, Empty) {
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(errors.wcError.get());
+ ASSERT_EQUALS(errors.wcError->getErrMessage(), "message");
+ ASSERT_EQUALS(errors.wcError->getErrCode(), 10990);
+}
+
+TEST(GLEParsing, WriteErrWithStats) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, n: 2, err: 'message', code: 1000}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(errors.writeError.get());
+ ASSERT_EQUALS(errors.writeError->getErrMessage(), "message");
+ ASSERT_EQUALS(errors.writeError->getErrCode(), 1000);
+ ASSERT(!errors.wcError.get());
+}
+
+TEST(GLEParsing, ReplTimeoutErrWithStats) {
+ const BSONObj gleResponse = fromjson(
+ "{ok: 1.0, err: 'timeout', errmsg: 'message', wtimeout: true,"
+ " n: 1, upserted: 'abcde'}");
+
+ GLEErrors errors;
+ ASSERT_OK(extractGLEErrors(gleResponse, &errors));
+ ASSERT(!errors.writeError.get());
+ ASSERT(errors.wcError.get());
+ ASSERT_EQUALS(errors.wcError->getErrMessage(), "message");
+ ASSERT(errors.wcError->getErrInfo()["wtimeout"].trueValue());
+ ASSERT_EQUALS(errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed);
+}
+
+//
+// Tests of processing and suppressing non-WC related fields from legacy GLE responses
+//
+
+TEST(LegacyGLESuppress, Basic) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: null}");
+
+ BSONObj stripped = stripNonWCInfo(gleResponse);
+ ASSERT_EQUALS(stripped.nFields(), 2); // with err, ok : true
+ ASSERT(stripped["ok"].trueValue());
+}
- const BSONObj gleResponse = fromjson( "{ok: 1.0, err: null}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( !errors.wcError.get() );
- }
-
- TEST(GLEParsing, WriteErr) {
-
- const BSONObj gleResponse = fromjson( "{ok: 1.0, err: 'message', code: 1000}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( errors.writeError.get() );
- ASSERT_EQUALS( errors.writeError->getErrMessage(), "message" );
- ASSERT_EQUALS( errors.writeError->getErrCode(), 1000 );
- ASSERT( !errors.wcError.get() );
- }
-
- TEST(GLEParsing, JournalFail) {
-
- const BSONObj gleResponse = fromjson( "{ok: 1.0, err: null, jnote: 'message'}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( errors.wcError.get() );
- ASSERT_EQUALS( errors.wcError->getErrMessage(), "message" );
- ASSERT_EQUALS( errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed );
- }
-
- TEST(GLEParsing, ReplErr) {
-
- const BSONObj gleResponse = fromjson( "{ok: 1.0, err: 'norepl', wnote: 'message'}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( errors.wcError.get() );
- ASSERT_EQUALS( errors.wcError->getErrMessage(), "message" );
- ASSERT_EQUALS( errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed );
- }
-
- TEST(GLEParsing, ReplTimeoutErr) {
-
- const BSONObj gleResponse =
- fromjson( "{ok: 1.0, err: 'timeout', errmsg: 'message', wtimeout: true}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( errors.wcError.get() );
- ASSERT_EQUALS( errors.wcError->getErrMessage(), "message" );
- ASSERT( errors.wcError->getErrInfo()["wtimeout"].trueValue() );
- ASSERT_EQUALS( errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed );
- }
-
- TEST(GLEParsing, GLEFail) {
-
- const BSONObj gleResponse =
- fromjson( "{ok: 0.0, err: null, errmsg: 'message', code: 1000}" );
-
- GLEErrors errors;
- Status status = extractGLEErrors( gleResponse, &errors );
- ASSERT_NOT_OK( status );
- ASSERT_EQUALS( status.reason(), "message" );
- ASSERT_EQUALS( status.code(), 1000 );
- }
-
- TEST(GLEParsing, GLEFailNoCode) {
+TEST(LegacyGLESuppress, BasicStats) {
+ const BSONObj gleResponse = fromjson(
+ "{ok: 0.0, err: 'message',"
+ " n: 1, nModified: 1, upserted: 'abc', updatedExisting: true}");
- const BSONObj gleResponse = fromjson( "{ok: 0.0, err: null, errmsg: 'message'}" );
-
- GLEErrors errors;
- Status status = extractGLEErrors( gleResponse, &errors );
- ASSERT_NOT_OK( status );
- ASSERT_EQUALS( status.reason(), "message" );
- ASSERT_EQUALS( status.code(), ErrorCodes::UnknownError );
- }
-
- TEST(GLEParsing, NotMasterGLEFail) {
-
- // Not master code in response
- const BSONObj gleResponse =
- fromjson( "{ok: 0.0, err: null, errmsg: 'message', code: 10990}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( errors.wcError.get() );
- ASSERT_EQUALS( errors.wcError->getErrMessage(), "message" );
- ASSERT_EQUALS( errors.wcError->getErrCode(), 10990 );
- }
-
- TEST(GLEParsing, WriteErrWithStats) {
- const BSONObj gleResponse = fromjson( "{ok: 1.0, n: 2, err: 'message', code: 1000}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( errors.writeError.get() );
- ASSERT_EQUALS( errors.writeError->getErrMessage(), "message" );
- ASSERT_EQUALS( errors.writeError->getErrCode(), 1000 );
- ASSERT( !errors.wcError.get() );
- }
-
- TEST(GLEParsing, ReplTimeoutErrWithStats) {
- const BSONObj gleResponse =
- fromjson( "{ok: 1.0, err: 'timeout', errmsg: 'message', wtimeout: true,"
- " n: 1, upserted: 'abcde'}" );
-
- GLEErrors errors;
- ASSERT_OK( extractGLEErrors( gleResponse, &errors ) );
- ASSERT( !errors.writeError.get() );
- ASSERT( errors.wcError.get() );
- ASSERT_EQUALS( errors.wcError->getErrMessage(), "message" );
- ASSERT( errors.wcError->getErrInfo()["wtimeout"].trueValue() );
- ASSERT_EQUALS( errors.wcError->getErrCode(), ErrorCodes::WriteConcernFailed );
- }
-
- //
- // Tests of processing and suppressing non-WC related fields from legacy GLE responses
- //
-
- TEST(LegacyGLESuppress, Basic) {
-
- const BSONObj gleResponse = fromjson( "{ok: 1.0, err: null}" );
-
- BSONObj stripped = stripNonWCInfo( gleResponse );
- ASSERT_EQUALS( stripped.nFields(), 2 ); // with err, ok : true
- ASSERT( stripped["ok"].trueValue() );
- }
-
- TEST(LegacyGLESuppress, BasicStats) {
-
- const BSONObj gleResponse =
- fromjson( "{ok: 0.0, err: 'message',"
- " n: 1, nModified: 1, upserted: 'abc', updatedExisting: true}" );
-
- BSONObj stripped = stripNonWCInfo( gleResponse );
- ASSERT_EQUALS( stripped.nFields(), 1 );
- ASSERT( !stripped["ok"].trueValue() );
- }
-
- TEST(LegacyGLESuppress, ReplError) {
-
- const BSONObj gleResponse =
- fromjson( "{ok: 0.0, err: 'norepl', n: 1, wcField: true}" );
-
- BSONObj stripped = stripNonWCInfo( gleResponse );
- ASSERT_EQUALS( stripped.nFields(), 3 );
- ASSERT( !stripped["ok"].trueValue() );
- ASSERT_EQUALS( stripped["err"].str(), "norepl" );
- ASSERT( stripped["wcField"].trueValue() );
- }
-
- TEST(LegacyGLESuppress, StripCode) {
-
- const BSONObj gleResponse =
- fromjson( "{ok: 1.0, err: 'message', code: 12345}" );
-
- BSONObj stripped = stripNonWCInfo( gleResponse );
- ASSERT_EQUALS( stripped.nFields(), 2 ); // with err, ok : true
- ASSERT( stripped["ok"].trueValue() );
- }
-
- TEST(LegacyGLESuppress, TimeoutDupError24) {
-
- const BSONObj gleResponse =
- BSON( "ok" << 0.0 << "err" << "message" << "code" << 12345
- << "err" << "timeout" << "code" << 56789 << "wtimeout" << true );
-
- BSONObj stripped = stripNonWCInfo( gleResponse );
- ASSERT_EQUALS( stripped.nFields(), 4 );
- ASSERT( !stripped["ok"].trueValue() );
- ASSERT_EQUALS( stripped["err"].str(), "timeout" );
- ASSERT_EQUALS( stripped["code"].numberInt(), 56789 );
- ASSERT( stripped["wtimeout"].trueValue() );
- }
+ BSONObj stripped = stripNonWCInfo(gleResponse);
+ ASSERT_EQUALS(stripped.nFields(), 1);
+ ASSERT(!stripped["ok"].trueValue());
+}
+
+TEST(LegacyGLESuppress, ReplError) {
+ const BSONObj gleResponse = fromjson("{ok: 0.0, err: 'norepl', n: 1, wcField: true}");
+
+ BSONObj stripped = stripNonWCInfo(gleResponse);
+ ASSERT_EQUALS(stripped.nFields(), 3);
+ ASSERT(!stripped["ok"].trueValue());
+ ASSERT_EQUALS(stripped["err"].str(), "norepl");
+ ASSERT(stripped["wcField"].trueValue());
+}
+
+TEST(LegacyGLESuppress, StripCode) {
+ const BSONObj gleResponse = fromjson("{ok: 1.0, err: 'message', code: 12345}");
+
+ BSONObj stripped = stripNonWCInfo(gleResponse);
+ ASSERT_EQUALS(stripped.nFields(), 2); // with err, ok : true
+ ASSERT(stripped["ok"].trueValue());
+}
+
+TEST(LegacyGLESuppress, TimeoutDupError24) {
+ const BSONObj gleResponse = BSON("ok" << 0.0 << "err"
+ << "message"
+ << "code" << 12345 << "err"
+ << "timeout"
+ << "code" << 56789 << "wtimeout" << true);
+
+ BSONObj stripped = stripNonWCInfo(gleResponse);
+ ASSERT_EQUALS(stripped.nFields(), 4);
+ ASSERT(!stripped["ok"].trueValue());
+ ASSERT_EQUALS(stripped["err"].str(), "timeout");
+ ASSERT_EQUALS(stripped["code"].numberInt(), 56789);
+ ASSERT(stripped["wtimeout"].trueValue());
+}
}
diff --git a/src/mongo/s/write_ops/batch_upconvert.cpp b/src/mongo/s/write_ops/batch_upconvert.cpp
index ee02c17744a..6c7fcacd361 100644
--- a/src/mongo/s/write_ops/batch_upconvert.cpp
+++ b/src/mongo/s/write_ops/batch_upconvert.cpp
@@ -43,201 +43,184 @@
namespace mongo {
- using mongoutils::str::stream;
- using std::string;
- using std::unique_ptr;
- using std::vector;
-
- void msgToBatchRequests( const Message& msg, vector<BatchedCommandRequest*>* requests ) {
-
- int opType = msg.operation();
-
- unique_ptr<BatchedCommandRequest> request;
- if ( opType == dbInsert ) {
- msgToBatchInserts( msg, requests );
- }
- else if ( opType == dbUpdate ) {
- requests->push_back( msgToBatchUpdate( msg ) );
- }
- else {
- dassert( opType == dbDelete );
- requests->push_back( msgToBatchDelete( msg ) );
- }
+using mongoutils::str::stream;
+using std::string;
+using std::unique_ptr;
+using std::vector;
+
+void msgToBatchRequests(const Message& msg, vector<BatchedCommandRequest*>* requests) {
+ int opType = msg.operation();
+
+ unique_ptr<BatchedCommandRequest> request;
+ if (opType == dbInsert) {
+ msgToBatchInserts(msg, requests);
+ } else if (opType == dbUpdate) {
+ requests->push_back(msgToBatchUpdate(msg));
+ } else {
+ dassert(opType == dbDelete);
+ requests->push_back(msgToBatchDelete(msg));
}
+}
- void msgToBatchInserts( const Message& insertMsg,
- vector<BatchedCommandRequest*>* insertRequests ) {
-
- // Parsing DbMessage throws
- DbMessage dbMsg( insertMsg );
- NamespaceString nss( dbMsg.getns() );
-
- // Continue-on-error == unordered
- bool coe = dbMsg.reservedField() & Reserved_InsertOption_ContinueOnError;
- bool ordered = !coe;
-
- while ( insertRequests->empty() || dbMsg.moreJSObjs() ) {
-
- // Collect docs for next batch, but don't exceed maximum size
- int totalInsertSize = 0;
- vector<BSONObj> docs;
- do {
- const char* prevObjMark = dbMsg.markGet();
- BSONObj nextObj = dbMsg.nextJsObj();
- if ( totalInsertSize + nextObj.objsize() <= BSONObjMaxUserSize ) {
- docs.push_back( nextObj );
- totalInsertSize += docs.back().objsize();
- }
- else {
- // Size limit exceeded, rollback to previous insert position
- dbMsg.markReset( prevObjMark );
- break;
- }
+void msgToBatchInserts(const Message& insertMsg, vector<BatchedCommandRequest*>* insertRequests) {
+ // Parsing DbMessage throws
+ DbMessage dbMsg(insertMsg);
+ NamespaceString nss(dbMsg.getns());
+
+ // Continue-on-error == unordered
+ bool coe = dbMsg.reservedField() & Reserved_InsertOption_ContinueOnError;
+ bool ordered = !coe;
+
+ while (insertRequests->empty() || dbMsg.moreJSObjs()) {
+ // Collect docs for next batch, but don't exceed maximum size
+ int totalInsertSize = 0;
+ vector<BSONObj> docs;
+ do {
+ const char* prevObjMark = dbMsg.markGet();
+ BSONObj nextObj = dbMsg.nextJsObj();
+ if (totalInsertSize + nextObj.objsize() <= BSONObjMaxUserSize) {
+ docs.push_back(nextObj);
+ totalInsertSize += docs.back().objsize();
+ } else {
+ // Size limit exceeded, rollback to previous insert position
+ dbMsg.markReset(prevObjMark);
+ break;
}
- while ( docs.size() < BatchedCommandRequest::kMaxWriteBatchSize
- && dbMsg.moreJSObjs() );
+ } while (docs.size() < BatchedCommandRequest::kMaxWriteBatchSize && dbMsg.moreJSObjs());
- dassert( !docs.empty() );
-
- // No exceptions from here on
- BatchedCommandRequest* request =
- new BatchedCommandRequest( BatchedCommandRequest::BatchType_Insert );
- request->setNSS( nss );
- for ( vector<BSONObj>::const_iterator it = docs.begin(); it != docs.end(); ++it ) {
- request->getInsertRequest()->addToDocuments( *it );
- }
- request->setOrdered( ordered );
- request->setWriteConcern( WriteConcernOptions::Acknowledged );
-
- insertRequests->push_back( request );
- }
- }
-
- BatchedCommandRequest* msgToBatchUpdate( const Message& updateMsg ) {
-
- // Parsing DbMessage throws
- DbMessage dbMsg( updateMsg );
- NamespaceString nss( dbMsg.getns() );
- int flags = dbMsg.pullInt();
- bool upsert = flags & UpdateOption_Upsert;
- bool multi = flags & UpdateOption_Multi;
- const BSONObj query = dbMsg.nextJsObj();
- const BSONObj updateExpr = dbMsg.nextJsObj();
+ dassert(!docs.empty());
// No exceptions from here on
- BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
- updateDoc->setQuery( query );
- updateDoc->setUpdateExpr( updateExpr );
- updateDoc->setUpsert( upsert );
- updateDoc->setMulti( multi );
-
BatchedCommandRequest* request =
- new BatchedCommandRequest( BatchedCommandRequest::BatchType_Update );
- request->setNSS( nss );
- request->getUpdateRequest()->addToUpdates( updateDoc );
- request->setWriteConcern( WriteConcernOptions::Acknowledged );
+ new BatchedCommandRequest(BatchedCommandRequest::BatchType_Insert);
+ request->setNSS(nss);
+ for (vector<BSONObj>::const_iterator it = docs.begin(); it != docs.end(); ++it) {
+ request->getInsertRequest()->addToDocuments(*it);
+ }
+ request->setOrdered(ordered);
+ request->setWriteConcern(WriteConcernOptions::Acknowledged);
- return request;
+ insertRequests->push_back(request);
}
+}
- BatchedCommandRequest* msgToBatchDelete( const Message& deleteMsg ) {
-
- // Parsing DbMessage throws
- DbMessage dbMsg( deleteMsg );
- NamespaceString nss( dbMsg.getns() );
- int flags = dbMsg.pullInt();
- const BSONObj query = dbMsg.nextJsObj();
- int limit = ( flags & RemoveOption_JustOne ) ? 1 : 0;
+BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg) {
+ // Parsing DbMessage throws
+ DbMessage dbMsg(updateMsg);
+ NamespaceString nss(dbMsg.getns());
+ int flags = dbMsg.pullInt();
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+ const BSONObj query = dbMsg.nextJsObj();
+ const BSONObj updateExpr = dbMsg.nextJsObj();
+
+ // No exceptions from here on
+ BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
+ updateDoc->setQuery(query);
+ updateDoc->setUpdateExpr(updateExpr);
+ updateDoc->setUpsert(upsert);
+ updateDoc->setMulti(multi);
+
+ BatchedCommandRequest* request =
+ new BatchedCommandRequest(BatchedCommandRequest::BatchType_Update);
+ request->setNSS(nss);
+ request->getUpdateRequest()->addToUpdates(updateDoc);
+ request->setWriteConcern(WriteConcernOptions::Acknowledged);
+
+ return request;
+}
- // No exceptions from here on
- BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
- deleteDoc->setLimit( limit );
- deleteDoc->setQuery( query );
+BatchedCommandRequest* msgToBatchDelete(const Message& deleteMsg) {
+ // Parsing DbMessage throws
+ DbMessage dbMsg(deleteMsg);
+ NamespaceString nss(dbMsg.getns());
+ int flags = dbMsg.pullInt();
+ const BSONObj query = dbMsg.nextJsObj();
+ int limit = (flags & RemoveOption_JustOne) ? 1 : 0;
+
+ // No exceptions from here on
+ BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
+ deleteDoc->setLimit(limit);
+ deleteDoc->setQuery(query);
+
+ BatchedCommandRequest* request =
+ new BatchedCommandRequest(BatchedCommandRequest::BatchType_Delete);
+ request->setNSS(nss);
+ request->getDeleteRequest()->addToDeletes(deleteDoc);
+ request->setWriteConcern(WriteConcernOptions::Acknowledged);
+
+ return request;
+}
- BatchedCommandRequest* request =
- new BatchedCommandRequest( BatchedCommandRequest::BatchType_Delete );
- request->setNSS( nss );
- request->getDeleteRequest()->addToDeletes( deleteDoc );
- request->setWriteConcern( WriteConcernOptions::Acknowledged );
+void buildErrorFromResponse(const BatchedCommandResponse& response, WriteErrorDetail* error) {
+ error->setErrCode(response.getErrCode());
+ error->setErrMessage(response.getErrMessage());
+}
- return request;
+bool batchErrorToLastError(const BatchedCommandRequest& request,
+ const BatchedCommandResponse& response,
+ LastError* error) {
+ unique_ptr<WriteErrorDetail> commandError;
+ WriteErrorDetail* lastBatchError = NULL;
+
+ if (!response.getOk()) {
+ // Command-level error, all writes failed
+
+ commandError.reset(new WriteErrorDetail);
+ buildErrorFromResponse(response, commandError.get());
+ lastBatchError = commandError.get();
+ } else if (response.isErrDetailsSet()) {
+ // The last error in the batch is always reported - this matches expected COE
+ // semantics for insert batches. For updates and deletes, error is only reported
+ // if the error was on the last item.
+
+ const bool lastOpErrored = response.getErrDetails().back()->getIndex() ==
+ static_cast<int>(request.sizeWriteOps() - 1);
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Insert || lastOpErrored) {
+ lastBatchError = response.getErrDetails().back();
+ }
+ } else {
+ // We don't care about write concern errors, these happen in legacy mode in GLE.
}
- void buildErrorFromResponse( const BatchedCommandResponse& response, WriteErrorDetail* error ) {
- error->setErrCode( response.getErrCode() );
- error->setErrMessage( response.getErrMessage() );
+ // Record an error if one exists
+ if (lastBatchError) {
+ string errMsg = lastBatchError->getErrMessage();
+ error->setLastError(lastBatchError->getErrCode(),
+ errMsg.empty() ? "see code for details" : errMsg.c_str());
+ return true;
}
- bool batchErrorToLastError( const BatchedCommandRequest& request,
- const BatchedCommandResponse& response,
- LastError* error ) {
-
- unique_ptr<WriteErrorDetail> commandError;
- WriteErrorDetail* lastBatchError = NULL;
-
- if ( !response.getOk() ) {
- // Command-level error, all writes failed
-
- commandError.reset( new WriteErrorDetail );
- buildErrorFromResponse( response, commandError.get() );
- lastBatchError = commandError.get();
- }
- else if ( response.isErrDetailsSet() ) {
- // The last error in the batch is always reported - this matches expected COE
- // semantics for insert batches. For updates and deletes, error is only reported
- // if the error was on the last item.
-
- const bool lastOpErrored = response.getErrDetails().back()->getIndex() ==
- static_cast<int>(request.sizeWriteOps() - 1);
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert ||
- lastOpErrored ) {
- lastBatchError = response.getErrDetails().back();
+ // Record write stats otherwise
+ // NOTE: For multi-write batches, our semantics change a little because we don't have
+ // un-aggregated "n" stats.
+ if (request.getBatchType() == BatchedCommandRequest::BatchType_Update) {
+ BSONObj upsertedId;
+ if (response.isUpsertDetailsSet()) {
+ // Only report the very last item's upserted id if applicable
+ if (response.getUpsertDetails().back()->getIndex() + 1 ==
+ static_cast<int>(request.sizeWriteOps())) {
+ upsertedId = response.getUpsertDetails().back()->getUpsertedID();
}
}
- else {
- // We don't care about write concern errors, these happen in legacy mode in GLE.
- }
- // Record an error if one exists
- if ( lastBatchError ) {
- string errMsg = lastBatchError->getErrMessage();
- error->setLastError(lastBatchError->getErrCode(),
- errMsg.empty() ? "see code for details" : errMsg.c_str());
- return true;
- }
-
- // Record write stats otherwise
- // NOTE: For multi-write batches, our semantics change a little because we don't have
- // un-aggregated "n" stats.
- if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) {
-
- BSONObj upsertedId;
- if( response.isUpsertDetailsSet() ) {
- // Only report the very last item's upserted id if applicable
- if ( response.getUpsertDetails().back()->getIndex() + 1
- == static_cast<int>( request.sizeWriteOps() ) ) {
- upsertedId = response.getUpsertDetails().back()->getUpsertedID();
- }
- }
+ int numUpserted = 0;
+ if (response.isUpsertDetailsSet())
+ numUpserted = response.sizeUpsertDetails();
- int numUpserted = 0;
- if ( response.isUpsertDetailsSet() )
- numUpserted = response.sizeUpsertDetails();
+ int numMatched = response.getN() - numUpserted;
+ dassert(numMatched >= 0);
- int numMatched = response.getN() - numUpserted;
- dassert( numMatched >= 0 );
+ // Wrap upserted id in "upserted" field
+ BSONObj leUpsertedId;
+ if (!upsertedId.isEmpty())
+ leUpsertedId = upsertedId.firstElement().wrap(kUpsertedFieldName);
- // Wrap upserted id in "upserted" field
- BSONObj leUpsertedId;
- if ( !upsertedId.isEmpty() )
- leUpsertedId = upsertedId.firstElement().wrap( kUpsertedFieldName );
-
- error->recordUpdate( numMatched > 0, response.getN(), leUpsertedId );
- }
- else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Delete ) {
- error->recordDelete( response.getN() );
- }
-
- return false;
+ error->recordUpdate(numMatched > 0, response.getN(), leUpsertedId);
+ } else if (request.getBatchType() == BatchedCommandRequest::BatchType_Delete) {
+ error->recordDelete(response.getN());
}
+
+ return false;
+}
}
diff --git a/src/mongo/s/write_ops/batch_upconvert.h b/src/mongo/s/write_ops/batch_upconvert.h
index 46b6d4552b6..a0b4712cf96 100644
--- a/src/mongo/s/write_ops/batch_upconvert.h
+++ b/src/mongo/s/write_ops/batch_upconvert.h
@@ -37,29 +37,28 @@
namespace mongo {
- //
- // Utility functions for up-converting incoming write messages into batch write requests.
- // NOTE: These functions throw on invalid message format.
- //
+//
+// Utility functions for up-converting incoming write messages into batch write requests.
+// NOTE: These functions throw on invalid message format.
+//
- void msgToBatchRequests( const Message& msg, std::vector<BatchedCommandRequest*>* requests );
+void msgToBatchRequests(const Message& msg, std::vector<BatchedCommandRequest*>* requests);
- // Batch inserts may get mapped to multiple batch requests, to avoid spilling MaxBSONObjSize
- void msgToBatchInserts( const Message& insertMsg,
- std::vector<BatchedCommandRequest*>* insertRequests );
+// Batch inserts may get mapped to multiple batch requests, to avoid spilling MaxBSONObjSize
+void msgToBatchInserts(const Message& insertMsg,
+ std::vector<BatchedCommandRequest*>* insertRequests);
- BatchedCommandRequest* msgToBatchUpdate( const Message& updateMsg );
+BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg);
- BatchedCommandRequest* msgToBatchDelete( const Message& deleteMsg );
-
- /**
- * Utility function for recording completed batch writes into the LastError object.
- * (Interpreting the response requires the request object as well.)
- *
- * Returns true if an error occurred in the batch.
- */
- bool batchErrorToLastError( const BatchedCommandRequest& request,
- const BatchedCommandResponse& response,
- LastError* error );
+BatchedCommandRequest* msgToBatchDelete(const Message& deleteMsg);
+/**
+ * Utility function for recording completed batch writes into the LastError object.
+ * (Interpreting the response requires the request object as well.)
+ *
+ * Returns true if an error occurred in the batch.
+ */
+bool batchErrorToLastError(const BatchedCommandRequest& request,
+ const BatchedCommandResponse& response,
+ LastError* error);
}
diff --git a/src/mongo/s/write_ops/batch_upconvert_test.cpp b/src/mongo/s/write_ops/batch_upconvert_test.cpp
index c802411c9e5..00d485818fe 100644
--- a/src/mongo/s/write_ops/batch_upconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_upconvert_test.cpp
@@ -33,7 +33,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/client/dbclientinterface.h" // for write constants
+#include "mongo/client/dbclientinterface.h" // for write constants
#include "mongo/db/write_concern_options.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/unittest/unittest.h"
@@ -41,112 +41,109 @@
namespace {
- using std::string;
- using std::vector;
-
- using namespace mongo;
-
- TEST(WriteBatchUpconvert, BasicInsert) {
-
- // Tests that an insert message is correctly upconverted to a batch insert
-
- const string ns = "foo.bar";
- const BSONObj doc = BSON( "hello" << "world" );
-
- Message insertMsg;
- BufBuilder insertMsgB;
-
- int reservedFlags = InsertOption_ContinueOnError;
- insertMsgB.appendNum( reservedFlags );
- insertMsgB.appendStr( ns );
- doc.appendSelfToBufBuilder( insertMsgB );
- insertMsg.setData( dbInsert, insertMsgB.buf(), insertMsgB.len() );
-
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
- msgToBatchRequests( insertMsg, &requests );
-
- BatchedCommandRequest* request = requests.back();
- ASSERT_EQUALS( request->getBatchType(), BatchedCommandRequest::BatchType_Insert );
- string errMsg;
- ASSERT( request->isValid( &errMsg ) );
- ASSERT_EQUALS( request->getNS(), ns );
- ASSERT( !request->getOrdered() );
- ASSERT_EQUALS( request->sizeWriteOps(), 1u );
- bool isSameDoc = doc.woCompare( request->getInsertRequest()->getDocumentsAt( 0 ) ) == 0;
- ASSERT( isSameDoc );
- ASSERT( request->getWriteConcern().woCompare( WriteConcernOptions::Acknowledged ) == 0 );
- }
-
- TEST(WriteBatchUpconvert, BasicUpdate) {
-
- // Tests that an update message is correctly upconverted to a batch update
-
- const string ns = "foo.bar";
- const BSONObj query = BSON( "hello" << "world" );
- const BSONObj update = BSON( "$set" << BSON( "hello" << "world" ) );
-
- Message updateMsg;
- BufBuilder updateMsgB;
-
- int reservedFlags = 0;
- updateMsgB.appendNum( reservedFlags );
- updateMsgB.appendStr( ns );
- updateMsgB.appendNum( UpdateOption_Upsert );
- query.appendSelfToBufBuilder( updateMsgB );
- update.appendSelfToBufBuilder( updateMsgB );
- updateMsg.setData( dbUpdate, updateMsgB.buf(), updateMsgB.len() );
-
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
- msgToBatchRequests( updateMsg, &requests );
-
- BatchedCommandRequest* request = requests.back();
- ASSERT_EQUALS( request->getBatchType(), BatchedCommandRequest::BatchType_Update );
- string errMsg;
- ASSERT( request->isValid( &errMsg ) );
- ASSERT_EQUALS( request->getNS(), ns );
- ASSERT_EQUALS( request->sizeWriteOps(), 1u );
- ASSERT( query.woCompare(
- request->getUpdateRequest()->getUpdatesAt( 0 )->getQuery() ) == 0 );
- ASSERT( update.woCompare(
- request->getUpdateRequest()->getUpdatesAt( 0 )->getUpdateExpr() ) == 0 );
- ASSERT( request->getUpdateRequest()->getUpdatesAt( 0 )->getUpsert() );
- ASSERT( !request->getUpdateRequest()->getUpdatesAt( 0 )->getMulti() );
- ASSERT( request->getWriteConcern().woCompare( WriteConcernOptions::Acknowledged ) == 0 );
- }
-
- TEST(WriteBatchUpconvert, BasicDelete) {
-
- // Tests that an remove message is correctly upconverted to a batch delete
-
- const string ns = "foo.bar";
- const BSONObj query = BSON( "hello" << "world" );
-
- Message deleteMsg;
- BufBuilder deleteMsgB;
-
- int reservedFlags = 0;
- deleteMsgB.appendNum( reservedFlags );
- deleteMsgB.appendStr( ns );
- deleteMsgB.appendNum( RemoveOption_JustOne );
- query.appendSelfToBufBuilder( deleteMsgB );
- deleteMsg.setData( dbDelete, deleteMsgB.buf(), deleteMsgB.len() );
-
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
- msgToBatchRequests( deleteMsg, &requests );
-
- BatchedCommandRequest* request = requests.back();
- ASSERT_EQUALS( request->getBatchType(), BatchedCommandRequest::BatchType_Delete );
- string errMsg;
- ASSERT( request->isValid( &errMsg ) );
- ASSERT_EQUALS( request->getNS(), ns );
- ASSERT_EQUALS( request->sizeWriteOps(), 1u );
- ASSERT( query.woCompare(
- request->getDeleteRequest()->getDeletesAt( 0 )->getQuery() ) == 0 );
- ASSERT( request->getDeleteRequest()->getDeletesAt( 0 )->getLimit() == 1 );
- ASSERT( request->getWriteConcern().woCompare( WriteConcernOptions::Acknowledged ) == 0 );
- }
+using std::string;
+using std::vector;
+
+using namespace mongo;
+
+TEST(WriteBatchUpconvert, BasicInsert) {
+ // Tests that an insert message is correctly upconverted to a batch insert
+
+ const string ns = "foo.bar";
+ const BSONObj doc = BSON("hello"
+ << "world");
+
+ Message insertMsg;
+ BufBuilder insertMsgB;
+
+ int reservedFlags = InsertOption_ContinueOnError;
+ insertMsgB.appendNum(reservedFlags);
+ insertMsgB.appendStr(ns);
+ doc.appendSelfToBufBuilder(insertMsgB);
+ insertMsg.setData(dbInsert, insertMsgB.buf(), insertMsgB.len());
+
+ OwnedPointerVector<BatchedCommandRequest> requestsOwned;
+ vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ msgToBatchRequests(insertMsg, &requests);
+
+ BatchedCommandRequest* request = requests.back();
+ ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Insert);
+ string errMsg;
+ ASSERT(request->isValid(&errMsg));
+ ASSERT_EQUALS(request->getNS(), ns);
+ ASSERT(!request->getOrdered());
+ ASSERT_EQUALS(request->sizeWriteOps(), 1u);
+ bool isSameDoc = doc.woCompare(request->getInsertRequest()->getDocumentsAt(0)) == 0;
+ ASSERT(isSameDoc);
+ ASSERT(request->getWriteConcern().woCompare(WriteConcernOptions::Acknowledged) == 0);
+}
+
+TEST(WriteBatchUpconvert, BasicUpdate) {
+ // Tests that an update message is correctly upconverted to a batch update
+
+ const string ns = "foo.bar";
+ const BSONObj query = BSON("hello"
+ << "world");
+ const BSONObj update = BSON("$set" << BSON("hello"
+ << "world"));
+
+ Message updateMsg;
+ BufBuilder updateMsgB;
+
+ int reservedFlags = 0;
+ updateMsgB.appendNum(reservedFlags);
+ updateMsgB.appendStr(ns);
+ updateMsgB.appendNum(UpdateOption_Upsert);
+ query.appendSelfToBufBuilder(updateMsgB);
+ update.appendSelfToBufBuilder(updateMsgB);
+ updateMsg.setData(dbUpdate, updateMsgB.buf(), updateMsgB.len());
+
+ OwnedPointerVector<BatchedCommandRequest> requestsOwned;
+ vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ msgToBatchRequests(updateMsg, &requests);
+
+ BatchedCommandRequest* request = requests.back();
+ ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Update);
+ string errMsg;
+ ASSERT(request->isValid(&errMsg));
+ ASSERT_EQUALS(request->getNS(), ns);
+ ASSERT_EQUALS(request->sizeWriteOps(), 1u);
+ ASSERT(query.woCompare(request->getUpdateRequest()->getUpdatesAt(0)->getQuery()) == 0);
+ ASSERT(update.woCompare(request->getUpdateRequest()->getUpdatesAt(0)->getUpdateExpr()) == 0);
+ ASSERT(request->getUpdateRequest()->getUpdatesAt(0)->getUpsert());
+ ASSERT(!request->getUpdateRequest()->getUpdatesAt(0)->getMulti());
+ ASSERT(request->getWriteConcern().woCompare(WriteConcernOptions::Acknowledged) == 0);
+}
+TEST(WriteBatchUpconvert, BasicDelete) {
+ // Tests that an remove message is correctly upconverted to a batch delete
+
+ const string ns = "foo.bar";
+ const BSONObj query = BSON("hello"
+ << "world");
+
+ Message deleteMsg;
+ BufBuilder deleteMsgB;
+
+ int reservedFlags = 0;
+ deleteMsgB.appendNum(reservedFlags);
+ deleteMsgB.appendStr(ns);
+ deleteMsgB.appendNum(RemoveOption_JustOne);
+ query.appendSelfToBufBuilder(deleteMsgB);
+ deleteMsg.setData(dbDelete, deleteMsgB.buf(), deleteMsgB.len());
+
+ OwnedPointerVector<BatchedCommandRequest> requestsOwned;
+ vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ msgToBatchRequests(deleteMsg, &requests);
+
+ BatchedCommandRequest* request = requests.back();
+ ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Delete);
+ string errMsg;
+ ASSERT(request->isValid(&errMsg));
+ ASSERT_EQUALS(request->getNS(), ns);
+ ASSERT_EQUALS(request->sizeWriteOps(), 1u);
+ ASSERT(query.woCompare(request->getDeleteRequest()->getDeletesAt(0)->getQuery()) == 0);
+ ASSERT(request->getDeleteRequest()->getDeletesAt(0)->getLimit() == 1);
+ ASSERT(request->getWriteConcern().woCompare(WriteConcernOptions::Acknowledged) == 0);
+}
}
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 307c39321ee..1fa89c034bc 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -36,7 +36,7 @@
#include "mongo/base/owned_pointer_map.h"
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/client/dbclientinterface.h" // ConnectionString (header-only)
+#include "mongo/client/dbclientinterface.h" // ConnectionString (header-only)
#include "mongo/s/client/multi_command_dispatch.h"
#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/write_error_detail.h"
@@ -44,354 +44,337 @@
namespace mongo {
- using std::endl;
- using std::make_pair;
- using std::stringstream;
- using std::vector;
-
- BatchWriteExec::BatchWriteExec( NSTargeter* targeter,
- ShardResolver* resolver,
- MultiCommandDispatch* dispatcher ) :
- _targeter( targeter ),
- _resolver( resolver ),
- _dispatcher( dispatcher ),
- _stats( new BatchWriteExecStats ) {
- }
+using std::endl;
+using std::make_pair;
+using std::stringstream;
+using std::vector;
- namespace {
+BatchWriteExec::BatchWriteExec(NSTargeter* targeter,
+ ShardResolver* resolver,
+ MultiCommandDispatch* dispatcher)
+ : _targeter(targeter),
+ _resolver(resolver),
+ _dispatcher(dispatcher),
+ _stats(new BatchWriteExecStats) {}
- //
- // Map which allows associating ConnectionString hosts with TargetedWriteBatches
- // This is needed since the dispatcher only returns hosts with responses.
- //
+namespace {
- // TODO: Unordered map?
- typedef OwnedPointerMap<ConnectionString, TargetedWriteBatch> OwnedHostBatchMap;
- }
+//
+// Map which allows associating ConnectionString hosts with TargetedWriteBatches
+// This is needed since the dispatcher only returns hosts with responses.
+//
- static void buildErrorFrom( const Status& status, WriteErrorDetail* error ) {
- error->setErrCode( status.code() );
- error->setErrMessage( status.reason() );
- }
+// TODO: Unordered map?
+typedef OwnedPointerMap<ConnectionString, TargetedWriteBatch> OwnedHostBatchMap;
+}
- // Helper to note several stale errors from a response
- static void noteStaleResponses( const vector<ShardError*>& staleErrors, NSTargeter* targeter ) {
- for ( vector<ShardError*>::const_iterator it = staleErrors.begin(); it != staleErrors.end();
- ++it ) {
- const ShardError* error = *it;
- targeter->noteStaleResponse( error->endpoint,
- error->error.isErrInfoSet() ? error->error.getErrInfo() :
- BSONObj() );
- }
- }
+static void buildErrorFrom(const Status& status, WriteErrorDetail* error) {
+ error->setErrCode(status.code());
+ error->setErrMessage(status.reason());
+}
- static bool isShardMetadataChanging( const vector<ShardError*>& staleErrors ) {
- if ( !staleErrors.empty() && staleErrors.back()->error.isErrInfoSet() )
- return staleErrors.back()->error.getErrInfo()["inCriticalSection"].trueValue();
- return false;
+// Helper to note several stale errors from a response
+static void noteStaleResponses(const vector<ShardError*>& staleErrors, NSTargeter* targeter) {
+ for (vector<ShardError*>::const_iterator it = staleErrors.begin(); it != staleErrors.end();
+ ++it) {
+ const ShardError* error = *it;
+ targeter->noteStaleResponse(
+ error->endpoint, error->error.isErrInfoSet() ? error->error.getErrInfo() : BSONObj());
}
+}
- // The number of times we'll try to continue a batch op if no progress is being made
- // This only applies when no writes are occurring and metadata is not changing on reload
- static const int kMaxRoundsWithoutProgress( 5 );
+static bool isShardMetadataChanging(const vector<ShardError*>& staleErrors) {
+ if (!staleErrors.empty() && staleErrors.back()->error.isErrInfoSet())
+ return staleErrors.back()->error.getErrInfo()["inCriticalSection"].trueValue();
+ return false;
+}
- void BatchWriteExec::executeBatch( const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse ) {
+// The number of times we'll try to continue a batch op if no progress is being made
+// This only applies when no writes are occurring and metadata is not changing on reload
+static const int kMaxRoundsWithoutProgress(5);
- LOG( 4 ) << "starting execution of write batch of size "
- << static_cast<int>( clientRequest.sizeWriteOps() )
- << " for " << clientRequest.getNS() << endl;
+void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
+ BatchedCommandResponse* clientResponse) {
+ LOG(4) << "starting execution of write batch of size "
+ << static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS()
+ << endl;
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &clientRequest );
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&clientRequest);
- // Current batch status
- bool refreshedTargeter = false;
- int rounds = 0;
- int numCompletedOps = 0;
- int numRoundsWithoutProgress = 0;
+ // Current batch status
+ bool refreshedTargeter = false;
+ int rounds = 0;
+ int numCompletedOps = 0;
+ int numRoundsWithoutProgress = 0;
- while ( !batchOp.isFinished() ) {
+ while (!batchOp.isFinished()) {
+ //
+ // Get child batches to send using the targeter
+ //
+ // Targeting errors can be caused by remote metadata changing (the collection could have
+ // been dropped and recreated, for example with a new shard key). If a remote metadata
+ // change occurs *before* a client sends us a batch, we need to make sure that we don't
+ // error out just because we're staler than the client - otherwise mongos will be have
+ // unpredictable behavior.
+ //
+ // (If a metadata change happens *during* or *after* a client sends us a batch, however,
+ // we make no guarantees about delivery.)
+ //
+ // For this reason, we don't record targeting errors until we've refreshed our targeting
+ // metadata at least once *after* receiving the client batch - at that point, we know:
+ //
+ // 1) our new metadata is the same as the metadata when the client sent a batch, and so
+ // targeting errors are real.
+ // OR
+ // 2) our new metadata is a newer version than when the client sent a batch, and so
+ // the metadata must have changed after the client batch was sent. We don't need to
+ // deliver in this case, since for all the client knows we may have gotten the batch
+ // exactly when the metadata changed.
+ //
- //
- // Get child batches to send using the targeter
- //
- // Targeting errors can be caused by remote metadata changing (the collection could have
- // been dropped and recreated, for example with a new shard key). If a remote metadata
- // change occurs *before* a client sends us a batch, we need to make sure that we don't
- // error out just because we're staler than the client - otherwise mongos will be have
- // unpredictable behavior.
- //
- // (If a metadata change happens *during* or *after* a client sends us a batch, however,
- // we make no guarantees about delivery.)
- //
- // For this reason, we don't record targeting errors until we've refreshed our targeting
- // metadata at least once *after* receiving the client batch - at that point, we know:
- //
- // 1) our new metadata is the same as the metadata when the client sent a batch, and so
- // targeting errors are real.
- // OR
- // 2) our new metadata is a newer version than when the client sent a batch, and so
- // the metadata must have changed after the client batch was sent. We don't need to
- // deliver in this case, since for all the client knows we may have gotten the batch
- // exactly when the metadata changed.
- //
+ OwnedPointerVector<TargetedWriteBatch> childBatchesOwned;
+ vector<TargetedWriteBatch*>& childBatches = childBatchesOwned.mutableVector();
+
+ // If we've already had a targeting error, we've refreshed the metadata once and can
+ // record target errors definitively.
+ bool recordTargetErrors = refreshedTargeter;
+ Status targetStatus = batchOp.targetBatch(*_targeter, recordTargetErrors, &childBatches);
+ if (!targetStatus.isOK()) {
+ // Don't do anything until a targeter refresh
+ _targeter->noteCouldNotTarget();
+ refreshedTargeter = true;
+ ++_stats->numTargetErrors;
+ dassert(childBatches.size() == 0u);
+ }
- OwnedPointerVector<TargetedWriteBatch> childBatchesOwned;
- vector<TargetedWriteBatch*>& childBatches = childBatchesOwned.mutableVector();
-
- // If we've already had a targeting error, we've refreshed the metadata once and can
- // record target errors definitively.
- bool recordTargetErrors = refreshedTargeter;
- Status targetStatus = batchOp.targetBatch( *_targeter,
- recordTargetErrors,
- &childBatches );
- if ( !targetStatus.isOK() ) {
- // Don't do anything until a targeter refresh
- _targeter->noteCouldNotTarget();
- refreshedTargeter = true;
- ++_stats->numTargetErrors;
- dassert( childBatches.size() == 0u );
- }
+ //
+ // Send all child batches
+ //
+
+ size_t numSent = 0;
+ size_t numToSend = childBatches.size();
+ bool remoteMetadataChanging = false;
+ while (numSent != numToSend) {
+ // Collect batches out on the network, mapped by endpoint
+ OwnedHostBatchMap ownedPendingBatches;
+ OwnedHostBatchMap::MapType& pendingBatches = ownedPendingBatches.mutableMap();
//
- // Send all child batches
+ // Send side
//
- size_t numSent = 0;
- size_t numToSend = childBatches.size();
- bool remoteMetadataChanging = false;
- while ( numSent != numToSend ) {
-
- // Collect batches out on the network, mapped by endpoint
- OwnedHostBatchMap ownedPendingBatches;
- OwnedHostBatchMap::MapType& pendingBatches = ownedPendingBatches.mutableMap();
-
+ // Get as many batches as we can at once
+ for (vector<TargetedWriteBatch*>::iterator it = childBatches.begin();
+ it != childBatches.end();
+ ++it) {
//
- // Send side
+ // Collect the info needed to dispatch our targeted batch
//
- // Get as many batches as we can at once
- for ( vector<TargetedWriteBatch*>::iterator it = childBatches.begin();
- it != childBatches.end(); ++it ) {
-
- //
- // Collect the info needed to dispatch our targeted batch
- //
-
- TargetedWriteBatch* nextBatch = *it;
- // If the batch is NULL, we sent it previously, so skip
- if ( nextBatch == NULL ) continue;
-
- // Figure out what host we need to dispatch our targeted batch
- ConnectionString shardHost;
- Status resolveStatus = _resolver->chooseWriteHost( nextBatch->getEndpoint()
- .shardName,
- &shardHost );
- if ( !resolveStatus.isOK() ) {
-
- ++_stats->numResolveErrors;
-
- // Record a resolve failure
- // TODO: It may be necessary to refresh the cache if stale, or maybe just
- // cancel and retarget the batch
- WriteErrorDetail error;
- buildErrorFrom( resolveStatus, &error );
-
- LOG( 4 ) << "unable to send write batch to " << shardHost.toString()
- << causedBy( resolveStatus.toString() ) << endl;
-
- batchOp.noteBatchError( *nextBatch, error );
-
- // We're done with this batch
- // Clean up when we can't resolve a host
- delete *it;
- *it = NULL;
- --numToSend;
- continue;
- }
-
- // If we already have a batch for this host, wait until the next time
- OwnedHostBatchMap::MapType::iterator pendingIt = pendingBatches.find( shardHost );
- if ( pendingIt != pendingBatches.end() ) continue;
-
- //
- // We now have all the info needed to dispatch the batch
- //
+ TargetedWriteBatch* nextBatch = *it;
+ // If the batch is NULL, we sent it previously, so skip
+ if (nextBatch == NULL)
+ continue;
- BatchedCommandRequest request( clientRequest.getBatchType() );
- batchOp.buildBatchRequest( *nextBatch, &request );
+ // Figure out what host we need to dispatch our targeted batch
+ ConnectionString shardHost;
+ Status resolveStatus =
+ _resolver->chooseWriteHost(nextBatch->getEndpoint().shardName, &shardHost);
+ if (!resolveStatus.isOK()) {
+ ++_stats->numResolveErrors;
- // Internally we use full namespaces for request/response, but we send the
- // command to a database with the collection name in the request.
- NamespaceString nss( request.getNS() );
- request.setNS( nss.coll() );
+ // Record a resolve failure
+ // TODO: It may be necessary to refresh the cache if stale, or maybe just
+ // cancel and retarget the batch
+ WriteErrorDetail error;
+ buildErrorFrom(resolveStatus, &error);
- LOG( 4 ) << "sending write batch to " << shardHost.toString() << ": "
- << request.toString() << endl;
+ LOG(4) << "unable to send write batch to " << shardHost.toString()
+ << causedBy(resolveStatus.toString()) << endl;
- _dispatcher->addCommand( shardHost, nss.db(), request );
+ batchOp.noteBatchError(*nextBatch, error);
- // Indicate we're done by setting the batch to NULL
- // We'll only get duplicate hostEndpoints if we have broadcast and non-broadcast
- // endpoints for the same host, so this should be pretty efficient without
- // moving stuff around.
+ // We're done with this batch
+ // Clean up when we can't resolve a host
+ delete *it;
*it = NULL;
-
- // Recv-side is responsible for cleaning up the nextBatch when used
- pendingBatches.insert( make_pair( shardHost, nextBatch ) );
+ --numToSend;
+ continue;
}
- // Send them all out
- _dispatcher->sendAll();
- numSent += pendingBatches.size();
+ // If we already have a batch for this host, wait until the next time
+ OwnedHostBatchMap::MapType::iterator pendingIt = pendingBatches.find(shardHost);
+ if (pendingIt != pendingBatches.end())
+ continue;
//
- // Recv side
+ // We now have all the info needed to dispatch the batch
//
- while ( _dispatcher->numPending() > 0 ) {
+ BatchedCommandRequest request(clientRequest.getBatchType());
+ batchOp.buildBatchRequest(*nextBatch, &request);
- // Get the response
- ConnectionString shardHost;
- BatchedCommandResponse response;
- Status dispatchStatus = _dispatcher->recvAny( &shardHost, &response );
+ // Internally we use full namespaces for request/response, but we send the
+ // command to a database with the collection name in the request.
+ NamespaceString nss(request.getNS());
+ request.setNS(nss.coll());
- // Get the TargetedWriteBatch to find where to put the response
- dassert( pendingBatches.find( shardHost ) != pendingBatches.end() );
- TargetedWriteBatch* batch = pendingBatches.find( shardHost )->second;
+ LOG(4) << "sending write batch to " << shardHost.toString() << ": "
+ << request.toString() << endl;
- if ( dispatchStatus.isOK() ) {
+ _dispatcher->addCommand(shardHost, nss.db(), request);
- TrackedErrors trackedErrors;
- trackedErrors.startTracking( ErrorCodes::StaleShardVersion );
+ // Indicate we're done by setting the batch to NULL
+ // We'll only get duplicate hostEndpoints if we have broadcast and non-broadcast
+ // endpoints for the same host, so this should be pretty efficient without
+ // moving stuff around.
+ *it = NULL;
- LOG( 4 ) << "write results received from " << shardHost.toString() << ": "
- << response.toString() << endl;
-
- // Dispatch was ok, note response
- batchOp.noteBatchResponse( *batch, response, &trackedErrors );
+ // Recv-side is responsible for cleaning up the nextBatch when used
+ pendingBatches.insert(make_pair(shardHost, nextBatch));
+ }
- // Note if anything was stale
- const vector<ShardError*>& staleErrors =
- trackedErrors.getErrors( ErrorCodes::StaleShardVersion );
+ // Send them all out
+ _dispatcher->sendAll();
+ numSent += pendingBatches.size();
- if ( staleErrors.size() > 0 ) {
- noteStaleResponses( staleErrors, _targeter );
- ++_stats->numStaleBatches;
- }
+ //
+ // Recv side
+ //
- // Remember if the shard is actively changing metadata right now
- if ( isShardMetadataChanging( staleErrors ) ) {
- remoteMetadataChanging = true;
- }
+ while (_dispatcher->numPending() > 0) {
+ // Get the response
+ ConnectionString shardHost;
+ BatchedCommandResponse response;
+ Status dispatchStatus = _dispatcher->recvAny(&shardHost, &response);
- // Remember that we successfully wrote to this shard
- // NOTE: This will record lastOps for shards where we actually didn't update
- // or delete any documents, which preserves old behavior but is conservative
- _stats->noteWriteAt( shardHost,
- response.isLastOpSet() ?
- response.getLastOp() : Timestamp(),
- response.isElectionIdSet() ?
- response.getElectionId() : OID());
- }
- else {
+ // Get the TargetedWriteBatch to find where to put the response
+ dassert(pendingBatches.find(shardHost) != pendingBatches.end());
+ TargetedWriteBatch* batch = pendingBatches.find(shardHost)->second;
- // Error occurred dispatching, note it
+ if (dispatchStatus.isOK()) {
+ TrackedErrors trackedErrors;
+ trackedErrors.startTracking(ErrorCodes::StaleShardVersion);
- stringstream msg;
- msg << "write results unavailable from " << shardHost.toString()
- << causedBy( dispatchStatus.toString() );
+ LOG(4) << "write results received from " << shardHost.toString() << ": "
+ << response.toString() << endl;
- WriteErrorDetail error;
- buildErrorFrom( Status( ErrorCodes::RemoteResultsUnavailable, msg.str() ),
- &error );
+ // Dispatch was ok, note response
+ batchOp.noteBatchResponse(*batch, response, &trackedErrors);
- LOG( 4 ) << "unable to receive write results from " << shardHost.toString()
- << causedBy( dispatchStatus.toString() ) << endl;
+ // Note if anything was stale
+ const vector<ShardError*>& staleErrors =
+ trackedErrors.getErrors(ErrorCodes::StaleShardVersion);
- batchOp.noteBatchError( *batch, error );
+ if (staleErrors.size() > 0) {
+ noteStaleResponses(staleErrors, _targeter);
+ ++_stats->numStaleBatches;
}
- }
- }
- ++rounds;
- ++_stats->numRounds;
+ // Remember if the shard is actively changing metadata right now
+ if (isShardMetadataChanging(staleErrors)) {
+ remoteMetadataChanging = true;
+ }
- // If we're done, get out
- if ( batchOp.isFinished() )
- break;
+ // Remember that we successfully wrote to this shard
+ // NOTE: This will record lastOps for shards where we actually didn't update
+ // or delete any documents, which preserves old behavior but is conservative
+ _stats->noteWriteAt(shardHost,
+ response.isLastOpSet() ? response.getLastOp() : Timestamp(),
+ response.isElectionIdSet() ? response.getElectionId()
+ : OID());
+ } else {
+ // Error occurred dispatching, note it
- // MORE WORK TO DO
+ stringstream msg;
+ msg << "write results unavailable from " << shardHost.toString()
+ << causedBy(dispatchStatus.toString());
- //
- // Refresh the targeter if we need to (no-op if nothing stale)
- //
+ WriteErrorDetail error;
+ buildErrorFrom(Status(ErrorCodes::RemoteResultsUnavailable, msg.str()), &error);
- bool targeterChanged = false;
- Status refreshStatus = _targeter->refreshIfNeeded( &targeterChanged );
+ LOG(4) << "unable to receive write results from " << shardHost.toString()
+ << causedBy(dispatchStatus.toString()) << endl;
- if ( !refreshStatus.isOK() ) {
-
- // It's okay if we can't refresh, we'll just record errors for the ops if
- // needed.
- warning() << "could not refresh targeter" << causedBy( refreshStatus.reason() )
- << endl;
+ batchOp.noteBatchError(*batch, error);
+ }
}
+ }
- //
- // Ensure progress is being made toward completing the batch op
- //
+ ++rounds;
+ ++_stats->numRounds;
- int currCompletedOps = batchOp.numWriteOpsIn( WriteOpState_Completed );
- if ( currCompletedOps == numCompletedOps && !targeterChanged
- && !remoteMetadataChanging ) {
- ++numRoundsWithoutProgress;
- }
- else {
- numRoundsWithoutProgress = 0;
- }
- numCompletedOps = currCompletedOps;
+ // If we're done, get out
+ if (batchOp.isFinished())
+ break;
- if ( numRoundsWithoutProgress > kMaxRoundsWithoutProgress ) {
+ // MORE WORK TO DO
- stringstream msg;
- msg << "no progress was made executing batch write op in " << clientRequest.getNS()
- << " after " << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps
- << " ops completed in " << rounds << " rounds total)";
+ //
+ // Refresh the targeter if we need to (no-op if nothing stale)
+ //
- WriteErrorDetail error;
- buildErrorFrom( Status( ErrorCodes::NoProgressMade, msg.str() ), &error );
- batchOp.abortBatch( error );
- break;
- }
+ bool targeterChanged = false;
+ Status refreshStatus = _targeter->refreshIfNeeded(&targeterChanged);
+
+ if (!refreshStatus.isOK()) {
+ // It's okay if we can't refresh, we'll just record errors for the ops if
+ // needed.
+ warning() << "could not refresh targeter" << causedBy(refreshStatus.reason()) << endl;
}
- batchOp.buildClientResponse( clientResponse );
+ //
+ // Ensure progress is being made toward completing the batch op
+ //
- LOG( 4 ) << "finished execution of write batch"
- << ( clientResponse->isErrDetailsSet() ? " with write errors" : "")
- << ( clientResponse->isErrDetailsSet() &&
- clientResponse->isWriteConcernErrorSet() ? " and" : "" )
- << ( clientResponse->isWriteConcernErrorSet() ? " with write concern error" : "" )
- << " for " << clientRequest.getNS() << endl;
+ int currCompletedOps = batchOp.numWriteOpsIn(WriteOpState_Completed);
+ if (currCompletedOps == numCompletedOps && !targeterChanged && !remoteMetadataChanging) {
+ ++numRoundsWithoutProgress;
+ } else {
+ numRoundsWithoutProgress = 0;
+ }
+ numCompletedOps = currCompletedOps;
+
+ if (numRoundsWithoutProgress > kMaxRoundsWithoutProgress) {
+ stringstream msg;
+ msg << "no progress was made executing batch write op in " << clientRequest.getNS()
+ << " after " << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps
+ << " ops completed in " << rounds << " rounds total)";
+
+ WriteErrorDetail error;
+ buildErrorFrom(Status(ErrorCodes::NoProgressMade, msg.str()), &error);
+ batchOp.abortBatch(error);
+ break;
+ }
}
- const BatchWriteExecStats& BatchWriteExec::getStats() {
- return *_stats;
- }
+ batchOp.buildClientResponse(clientResponse);
- BatchWriteExecStats* BatchWriteExec::releaseStats() {
- return _stats.release();
- }
+ LOG(4) << "finished execution of write batch"
+ << (clientResponse->isErrDetailsSet() ? " with write errors" : "")
+ << (clientResponse->isErrDetailsSet() && clientResponse->isWriteConcernErrorSet()
+ ? " and"
+ : "")
+ << (clientResponse->isWriteConcernErrorSet() ? " with write concern error" : "")
+ << " for " << clientRequest.getNS() << endl;
+}
- void BatchWriteExecStats::noteWriteAt(const ConnectionString& host,
- Timestamp opTime,
- const OID& electionId) {
- _writeOpTimes[host] = HostOpTime(opTime, electionId);
- }
+const BatchWriteExecStats& BatchWriteExec::getStats() {
+ return *_stats;
+}
- const HostOpTimeMap& BatchWriteExecStats::getWriteOpTimes() const {
- return _writeOpTimes;
- }
+BatchWriteExecStats* BatchWriteExec::releaseStats() {
+ return _stats.release();
+}
+
+void BatchWriteExecStats::noteWriteAt(const ConnectionString& host,
+ Timestamp opTime,
+ const OID& electionId) {
+ _writeOpTimes[host] = HostOpTime(opTime, electionId);
+}
+
+const HostOpTimeMap& BatchWriteExecStats::getWriteOpTimes() const {
+ return _writeOpTimes;
+}
}
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index b7fd6c51e43..708b9481aef 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -41,93 +41,87 @@
namespace mongo {
- class BatchWriteExecStats;
- class MultiCommandDispatch;
+class BatchWriteExecStats;
+class MultiCommandDispatch;
+
+/**
+ * The BatchWriteExec is able to execute client batch write requests, resulting in a batch
+ * response to send back to the client.
+ *
+ * There are two main interfaces the exec uses to "run" the batch:
+ *
+ * - the "targeter" used to generate child batch operations to send to particular shards
+ *
+ * - the "dispatcher" used to send child batches to several shards at once, and retrieve the
+ * results
+ *
+ * Both the targeter and dispatcher are assumed to be dedicated to this particular
+ * BatchWriteExec instance.
+ *
+ */
+class BatchWriteExec {
+ MONGO_DISALLOW_COPYING(BatchWriteExec);
+
+public:
+ BatchWriteExec(NSTargeter* targeter, ShardResolver* resolver, MultiCommandDispatch* dispatcher);
/**
- * The BatchWriteExec is able to execute client batch write requests, resulting in a batch
- * response to send back to the client.
- *
- * There are two main interfaces the exec uses to "run" the batch:
- *
- * - the "targeter" used to generate child batch operations to send to particular shards
- *
- * - the "dispatcher" used to send child batches to several shards at once, and retrieve the
- * results
- *
- * Both the targeter and dispatcher are assumed to be dedicated to this particular
- * BatchWriteExec instance.
+ * Executes a client batch write request by sending child batches to several shard
+ * endpoints, and returns a client batch write response.
*
+ * This function does not throw, any errors are reported via the clientResponse.
*/
- class BatchWriteExec {
- MONGO_DISALLOW_COPYING (BatchWriteExec);
- public:
-
- BatchWriteExec( NSTargeter* targeter,
- ShardResolver* resolver,
- MultiCommandDispatch* dispatcher );
-
- /**
- * Executes a client batch write request by sending child batches to several shard
- * endpoints, and returns a client batch write response.
- *
- * This function does not throw, any errors are reported via the clientResponse.
- */
- void executeBatch( const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse );
-
- const BatchWriteExecStats& getStats();
-
- BatchWriteExecStats* releaseStats();
-
- private:
+ void executeBatch(const BatchedCommandRequest& clientRequest,
+ BatchedCommandResponse* clientResponse);
- // Not owned here
- NSTargeter* _targeter;
+ const BatchWriteExecStats& getStats();
- // Not owned here
- ShardResolver* _resolver;
+ BatchWriteExecStats* releaseStats();
- // Not owned here
- MultiCommandDispatch* _dispatcher;
+private:
+ // Not owned here
+ NSTargeter* _targeter;
- // Stats
- std::unique_ptr<BatchWriteExecStats> _stats;
- };
+ // Not owned here
+ ShardResolver* _resolver;
- struct HostOpTime {
- HostOpTime(Timestamp ot, OID e) : opTime(ot), electionId(e) {};
- HostOpTime() {};
- Timestamp opTime;
- OID electionId;
- };
+ // Not owned here
+ MultiCommandDispatch* _dispatcher;
- typedef std::map<ConnectionString, HostOpTime> HostOpTimeMap;
+ // Stats
+ std::unique_ptr<BatchWriteExecStats> _stats;
+};
- class BatchWriteExecStats {
- public:
+struct HostOpTime {
+ HostOpTime(Timestamp ot, OID e) : opTime(ot), electionId(e){};
+ HostOpTime(){};
+ Timestamp opTime;
+ OID electionId;
+};
- BatchWriteExecStats() :
- numRounds( 0 ), numTargetErrors( 0 ), numResolveErrors( 0 ), numStaleBatches( 0 ) {
- }
+typedef std::map<ConnectionString, HostOpTime> HostOpTimeMap;
- void noteWriteAt(const ConnectionString& host, Timestamp opTime, const OID& electionId);
+class BatchWriteExecStats {
+public:
+ BatchWriteExecStats()
+ : numRounds(0), numTargetErrors(0), numResolveErrors(0), numStaleBatches(0) {}
- const HostOpTimeMap& getWriteOpTimes() const;
+ void noteWriteAt(const ConnectionString& host, Timestamp opTime, const OID& electionId);
- // Expose via helpers if this gets more complex
+ const HostOpTimeMap& getWriteOpTimes() const;
- // Number of round trips required for the batch
- int numRounds;
- // Number of times targeting failed
- int numTargetErrors;
- // Number of times host resolution failed
- int numResolveErrors;
- // Number of stale batches
- int numStaleBatches;
+ // Expose via helpers if this gets more complex
- private:
+ // Number of round trips required for the batch
+ int numRounds;
+ // Number of times targeting failed
+ int numTargetErrors;
+ // Number of times host resolution failed
+ int numResolveErrors;
+ // Number of stale batches
+ int numStaleBatches;
- HostOpTimeMap _writeOpTimes;
- };
+private:
+ HostOpTimeMap _writeOpTimes;
+};
}
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 3e0b2cfb9f3..21060c79983 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -39,277 +39,266 @@
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- using namespace mongo;
+using namespace mongo;
- /**
- * Mimics a single shard backend for a particular collection which can be initialized with a
- * set of write command results to return.
- */
- class MockSingleShardBackend {
- public:
-
- MockSingleShardBackend( const NamespaceString& nss ) {
-
- // Initialize targeting to a mock shard
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
- targeter.init( mockRanges );
-
- // Get the connection string for the mock shard
- resolver.chooseWriteHost( mockRanges.front()->endpoint.shardName, &shardHost );
+/**
+ * Mimics a single shard backend for a particular collection which can be initialized with a
+ * set of write command results to return.
+ */
+class MockSingleShardBackend {
+public:
+ MockSingleShardBackend(const NamespaceString& nss) {
+ // Initialize targeting to a mock shard
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(
+ new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
+ targeter.init(mockRanges);
+
+ // Get the connection string for the mock shard
+ resolver.chooseWriteHost(mockRanges.front()->endpoint.shardName, &shardHost);
+
+ // Executor using the mock backend
+ exec.reset(new BatchWriteExec(&targeter, &resolver, &dispatcher));
+ }
- // Executor using the mock backend
- exec.reset( new BatchWriteExec( &targeter, &resolver, &dispatcher ) );
- }
+ void setMockResults(const vector<MockWriteResult*>& results) {
+ dispatcher.init(results);
+ }
- void setMockResults( const vector<MockWriteResult*>& results ) {
- dispatcher.init( results );
- }
+ ConnectionString shardHost;
- ConnectionString shardHost;
+ MockNSTargeter targeter;
+ MockShardResolver resolver;
+ MockMultiWriteCommand dispatcher;
- MockNSTargeter targeter;
- MockShardResolver resolver;
- MockMultiWriteCommand dispatcher;
+ unique_ptr<BatchWriteExec> exec;
+};
- unique_ptr<BatchWriteExec> exec;
- };
+//
+// Tests for the BatchWriteExec
+//
+TEST(BatchWriteExecTests, SingleOp) {
//
- // Tests for the BatchWriteExec
+ // Basic execution test
//
- TEST(BatchWriteExecTests, SingleOp) {
-
- //
- // Basic execution test
- //
+ NamespaceString nss("foo.bar");
- NamespaceString nss( "foo.bar" );
+ MockSingleShardBackend backend(nss);
- MockSingleShardBackend backend( nss );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write op
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write op
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
+ const BatchWriteExecStats& stats = backend.exec->getStats();
+ ASSERT_EQUALS(stats.numRounds, 1);
+}
- const BatchWriteExecStats& stats = backend.exec->getStats();
- ASSERT_EQUALS( stats.numRounds, 1 );
- }
+TEST(BatchWriteExecTests, SingleOpError) {
+ //
+ // Basic error test
+ //
- TEST(BatchWriteExecTests, SingleOpError) {
+ NamespaceString nss("foo.bar");
- //
- // Basic error test
- //
+ MockSingleShardBackend backend(nss);
- NamespaceString nss( "foo.bar" );
+ vector<MockWriteResult*> mockResults;
+ BatchedCommandResponse errResponse;
+ errResponse.setOk(false);
+ errResponse.setErrCode(ErrorCodes::UnknownError);
+ errResponse.setErrMessage("mock error");
+ mockResults.push_back(new MockWriteResult(backend.shardHost, errResponse));
- MockSingleShardBackend backend( nss );
+ backend.setMockResults(mockResults);
- vector<MockWriteResult*> mockResults;
- BatchedCommandResponse errResponse;
- errResponse.setOk( false );
- errResponse.setErrCode( ErrorCodes::UnknownError );
- errResponse.setErrMessage( "mock error" );
- mockResults.push_back( new MockWriteResult( backend.shardHost, errResponse ) );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write op
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- backend.setMockResults( mockResults );
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
+ ASSERT_EQUALS(response.getN(), 0);
+ ASSERT(response.isErrDetailsSet());
+ ASSERT_EQUALS(response.getErrDetailsAt(0)->getErrCode(), errResponse.getErrCode());
+ ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(errResponse.getErrMessage()) !=
+ string::npos);
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write op
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ const BatchWriteExecStats& stats = backend.exec->getStats();
+ ASSERT_EQUALS(stats.numRounds, 1);
+}
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
- ASSERT_EQUALS( response.getN(), 0 );
- ASSERT( response.isErrDetailsSet() );
- ASSERT_EQUALS( response.getErrDetailsAt( 0 )->getErrCode(), errResponse.getErrCode() );
- ASSERT( response.getErrDetailsAt( 0 )->getErrMessage().find( errResponse.getErrMessage() )
- != string::npos );
-
- const BatchWriteExecStats& stats = backend.exec->getStats();
- ASSERT_EQUALS( stats.numRounds, 1 );
- }
+//
+// Test retryable errors
+//
+TEST(BatchWriteExecTests, StaleOp) {
//
- // Test retryable errors
+ // Retry op in exec b/c of stale config
//
- TEST(BatchWriteExecTests, StaleOp) {
-
- //
- // Retry op in exec b/c of stale config
- //
+ NamespaceString nss("foo.bar");
- NamespaceString nss( "foo.bar" );
+ // Insert request
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write op
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- // Insert request
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write op
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ MockSingleShardBackend backend(nss);
- MockSingleShardBackend backend( nss );
+ vector<MockWriteResult*> mockResults;
+ WriteErrorDetail error;
+ error.setErrCode(ErrorCodes::StaleShardVersion);
+ error.setErrMessage("mock stale error");
+ mockResults.push_back(new MockWriteResult(backend.shardHost, error));
- vector<MockWriteResult*> mockResults;
- WriteErrorDetail error;
- error.setErrCode( ErrorCodes::StaleShardVersion );
- error.setErrMessage( "mock stale error" );
- mockResults.push_back( new MockWriteResult( backend.shardHost, error ) );
+ backend.setMockResults(mockResults);
- backend.setMockResults( mockResults );
+ // Execute request
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
- // Execute request
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
+ const BatchWriteExecStats& stats = backend.exec->getStats();
+ ASSERT_EQUALS(stats.numStaleBatches, 1);
+}
- const BatchWriteExecStats& stats = backend.exec->getStats();
- ASSERT_EQUALS( stats.numStaleBatches, 1 );
- }
-
- TEST(BatchWriteExecTests, MultiStaleOp) {
+TEST(BatchWriteExecTests, MultiStaleOp) {
+ //
+ // Retry op in exec multiple times b/c of stale config
+ //
- //
- // Retry op in exec multiple times b/c of stale config
- //
+ NamespaceString nss("foo.bar");
- NamespaceString nss( "foo.bar" );
+ // Insert request
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write op
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- // Insert request
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write op
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ MockSingleShardBackend backend(nss);
- MockSingleShardBackend backend( nss );
+ vector<MockWriteResult*> mockResults;
+ WriteErrorDetail error;
+ error.setErrCode(ErrorCodes::StaleShardVersion);
+ error.setErrMessage("mock stale error");
+ for (int i = 0; i < 3; i++) {
+ mockResults.push_back(new MockWriteResult(backend.shardHost, error));
+ }
- vector<MockWriteResult*> mockResults;
- WriteErrorDetail error;
- error.setErrCode( ErrorCodes::StaleShardVersion );
- error.setErrMessage( "mock stale error" );
- for ( int i = 0; i < 3; i++ ) {
- mockResults.push_back( new MockWriteResult( backend.shardHost, error ) );
- }
+ backend.setMockResults(mockResults);
- backend.setMockResults( mockResults );
+ // Execute request
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
- // Execute request
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
+ const BatchWriteExecStats& stats = backend.exec->getStats();
+ ASSERT_EQUALS(stats.numStaleBatches, 3);
+}
- const BatchWriteExecStats& stats = backend.exec->getStats();
- ASSERT_EQUALS( stats.numStaleBatches, 3 );
- }
+TEST(BatchWriteExecTests, TooManyStaleOp) {
+ //
+ // Retry op in exec too many times (without refresh) b/c of stale config
+ // (The mock targeter doesn't report progress on refresh)
+ // We should report a no progress error for everything in the batch
+ //
- TEST(BatchWriteExecTests, TooManyStaleOp) {
-
- //
- // Retry op in exec too many times (without refresh) b/c of stale config
- // (The mock targeter doesn't report progress on refresh)
- // We should report a no progress error for everything in the batch
- //
-
- NamespaceString nss( "foo.bar" );
-
- // Insert request
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write ops
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
-
- MockSingleShardBackend backend( nss );
-
- vector<MockWriteResult*> mockResults;
- WriteErrorDetail error;
- error.setErrCode( ErrorCodes::StaleShardVersion );
- error.setErrMessage( "mock stale error" );
- for ( int i = 0; i < 10; i++ ) {
- mockResults.push_back( new MockWriteResult( backend.shardHost,
- error,
- request.sizeWriteOps() ) );
- }
-
- backend.setMockResults( mockResults );
-
- // Execute request
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
- ASSERT_EQUALS( response.getN(), 0 );
- ASSERT( response.isErrDetailsSet() );
- ASSERT_EQUALS( response.getErrDetailsAt( 0 )->getErrCode(), ErrorCodes::NoProgressMade );
- ASSERT_EQUALS( response.getErrDetailsAt( 1 )->getErrCode(), ErrorCodes::NoProgressMade );
+ NamespaceString nss("foo.bar");
+
+ // Insert request
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write ops
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+
+ MockSingleShardBackend backend(nss);
+
+ vector<MockWriteResult*> mockResults;
+ WriteErrorDetail error;
+ error.setErrCode(ErrorCodes::StaleShardVersion);
+ error.setErrMessage("mock stale error");
+ for (int i = 0; i < 10; i++) {
+ mockResults.push_back(
+ new MockWriteResult(backend.shardHost, error, request.sizeWriteOps()));
}
- TEST(BatchWriteExecTests, ManyStaleOpWithMigration) {
+ backend.setMockResults(mockResults);
- //
- // Retry op in exec many times b/c of stale config, but simulate remote migrations occurring
- //
+ // Execute request
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
+ ASSERT_EQUALS(response.getN(), 0);
+ ASSERT(response.isErrDetailsSet());
+ ASSERT_EQUALS(response.getErrDetailsAt(0)->getErrCode(), ErrorCodes::NoProgressMade);
+ ASSERT_EQUALS(response.getErrDetailsAt(1)->getErrCode(), ErrorCodes::NoProgressMade);
+}
- NamespaceString nss( "foo.bar" );
-
- // Insert request
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.setWriteConcern( BSONObj() );
- // Do single-target, single doc batch write op
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- MockSingleShardBackend backend( nss );
-
- vector<MockWriteResult*> mockResults;
- WriteErrorDetail error;
- error.setErrCode( ErrorCodes::StaleShardVersion );
- error.setErrMessage( "mock stale error" );
- for ( int i = 0; i < 10; i++ ) {
- if ( i % 2 == 0 )
- error.setErrInfo( BSONObj() );
- else
- error.setErrInfo( BSON( "inCriticalSection" << true ) );
+TEST(BatchWriteExecTests, ManyStaleOpWithMigration) {
+ //
+ // Retry op in exec many times b/c of stale config, but simulate remote migrations occurring
+ //
- mockResults.push_back( new MockWriteResult( backend.shardHost, error ) );
- }
+ NamespaceString nss("foo.bar");
+
+ // Insert request
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.setWriteConcern(BSONObj());
+ // Do single-target, single doc batch write op
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+
+ MockSingleShardBackend backend(nss);
+
+ vector<MockWriteResult*> mockResults;
+ WriteErrorDetail error;
+ error.setErrCode(ErrorCodes::StaleShardVersion);
+ error.setErrMessage("mock stale error");
+ for (int i = 0; i < 10; i++) {
+ if (i % 2 == 0)
+ error.setErrInfo(BSONObj());
+ else
+ error.setErrInfo(BSON("inCriticalSection" << true));
+
+ mockResults.push_back(new MockWriteResult(backend.shardHost, error));
+ }
- backend.setMockResults( mockResults );
+ backend.setMockResults(mockResults);
- // Execute request
- BatchedCommandResponse response;
- backend.exec->executeBatch( request, &response );
- ASSERT( response.getOk() );
+ // Execute request
+ BatchedCommandResponse response;
+ backend.exec->executeBatch(request, &response);
+ ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
- ASSERT_EQUALS( stats.numStaleBatches, 10 );
- }
+ const BatchWriteExecStats& stats = backend.exec->getStats();
+ ASSERT_EQUALS(stats.numStaleBatches, 10);
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index b1032410c7a..847b96ff9ee 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -32,889 +32,829 @@
namespace mongo {
- using std::unique_ptr;
- using std::make_pair;
- using std::set;
- using std::stringstream;
- using std::vector;
-
- /**
- * Returns a new write concern that has the copy of every field from the original
- * document but with a w set to 1. This is intended for upgrading { w: 0 } write
- * concern to { w: 1 }.
- */
- static BSONObj upgradeWriteConcern ( const BSONObj& origWriteConcern ) {
- BSONObjIterator iter( origWriteConcern );
- BSONObjBuilder newWriteConcern;
-
- while ( iter.more() ) {
- BSONElement elem( iter.next() );
-
- if ( strncmp( elem.fieldName(), "w", 2 ) == 0 ) {
- newWriteConcern.append( "w", 1 );
- }
- else {
- newWriteConcern.append( elem );
- }
- }
-
- return newWriteConcern.obj();
- }
-
- BatchWriteStats::BatchWriteStats() :
- numInserted( 0 ), numUpserted( 0 ), numMatched( 0 ), numModified( 0 ), numDeleted( 0 ) {
- }
+using std::unique_ptr;
+using std::make_pair;
+using std::set;
+using std::stringstream;
+using std::vector;
- BatchWriteOp::BatchWriteOp() :
- _clientRequest( NULL ), _writeOps( NULL ), _stats( new BatchWriteStats ) {
- }
-
- void BatchWriteOp::initClientRequest( const BatchedCommandRequest* clientRequest ) {
- dassert( clientRequest->isValid( NULL ) );
+/**
+ * Returns a new write concern that has the copy of every field from the original
+ * document but with a w set to 1. This is intended for upgrading { w: 0 } write
+ * concern to { w: 1 }.
+ */
+static BSONObj upgradeWriteConcern(const BSONObj& origWriteConcern) {
+ BSONObjIterator iter(origWriteConcern);
+ BSONObjBuilder newWriteConcern;
- size_t numWriteOps = clientRequest->sizeWriteOps();
- _writeOps = static_cast<WriteOp*>( ::operator new[]( numWriteOps * sizeof(WriteOp) ) );
+ while (iter.more()) {
+ BSONElement elem(iter.next());
- for ( size_t i = 0; i < numWriteOps; ++i ) {
- // Don't want to have to define what an empty WriteOp means, so construct in-place
- new ( &_writeOps[i] ) WriteOp( BatchItemRef( clientRequest, i ) );
+ if (strncmp(elem.fieldName(), "w", 2) == 0) {
+ newWriteConcern.append("w", 1);
+ } else {
+ newWriteConcern.append(elem);
}
-
- _clientRequest = clientRequest;
}
- // Arbitrary endpoint ordering, needed for grouping by endpoint
- static int compareEndpoints( const ShardEndpoint* endpointA, const ShardEndpoint* endpointB ) {
-
- int shardNameDiff = endpointA->shardName.compare( endpointB->shardName );
- if ( shardNameDiff != 0 ) return shardNameDiff;
-
- long shardVersionDiff = endpointA->shardVersion.toLong() - endpointB->shardVersion.toLong();
- if ( shardVersionDiff != 0 ) return shardVersionDiff;
-
- int shardEpochDiff =
- endpointA->shardVersion.epoch().compare( endpointB->shardVersion.epoch() );
- return shardEpochDiff;
- }
-
- namespace {
-
- //
- // Types for comparing shard endpoints in a map
- //
-
- struct EndpointComp {
- bool operator()( const ShardEndpoint* endpointA,
- const ShardEndpoint* endpointB ) const {
- return compareEndpoints( endpointA, endpointB ) < 0;
- }
- };
-
- typedef std::map<const ShardEndpoint*, TargetedWriteBatch*, EndpointComp> TargetedBatchMap;
-
- //
- // Types for tracking batch sizes
- //
+ return newWriteConcern.obj();
+}
- struct BatchSize {
+BatchWriteStats::BatchWriteStats()
+ : numInserted(0), numUpserted(0), numMatched(0), numModified(0), numDeleted(0) {}
- BatchSize() :
- numOps(0), sizeBytes(0) {
- }
+BatchWriteOp::BatchWriteOp() : _clientRequest(NULL), _writeOps(NULL), _stats(new BatchWriteStats) {}
- int numOps;
- int sizeBytes;
- };
+void BatchWriteOp::initClientRequest(const BatchedCommandRequest* clientRequest) {
+ dassert(clientRequest->isValid(NULL));
- typedef std::map<const ShardEndpoint*, BatchSize, EndpointComp> TargetedBatchSizeMap;
- }
+ size_t numWriteOps = clientRequest->sizeWriteOps();
+ _writeOps = static_cast<WriteOp*>(::operator new[](numWriteOps * sizeof(WriteOp)));
- static void buildTargetError( const Status& errStatus, WriteErrorDetail* details ) {
- details->setErrCode( errStatus.code() );
- details->setErrMessage( errStatus.reason() );
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ // Don't want to have to define what an empty WriteOp means, so construct in-place
+ new (&_writeOps[i]) WriteOp(BatchItemRef(clientRequest, i));
}
- // Helper to determine whether a number of targeted writes require a new targeted batch
- static bool isNewBatchRequired( const vector<TargetedWrite*>& writes,
- const TargetedBatchMap& batchMap ) {
-
- for ( vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end();
- ++it ) {
+ _clientRequest = clientRequest;
+}
- TargetedWrite* write = *it;
- if ( batchMap.find( &write->endpoint ) == batchMap.end() ) {
- return true;
- }
- }
+// Arbitrary endpoint ordering, needed for grouping by endpoint
+static int compareEndpoints(const ShardEndpoint* endpointA, const ShardEndpoint* endpointB) {
+ int shardNameDiff = endpointA->shardName.compare(endpointB->shardName);
+ if (shardNameDiff != 0)
+ return shardNameDiff;
- return false;
- }
+ long shardVersionDiff = endpointA->shardVersion.toLong() - endpointB->shardVersion.toLong();
+ if (shardVersionDiff != 0)
+ return shardVersionDiff;
- // MAGIC NUMBERS
- // Before serializing updates/deletes, we don't know how big their fields would be, but we break
- // batches before serializing.
- // TODO: Revisit when we revisit command limits in general
- static const int kEstUpdateOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
- static const int kEstDeleteOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
+ int shardEpochDiff = endpointA->shardVersion.epoch().compare(endpointB->shardVersion.epoch());
+ return shardEpochDiff;
+}
- static int getWriteSizeBytes(const WriteOp& writeOp) {
+namespace {
- const BatchItemRef& item = writeOp.getWriteItem();
- BatchedCommandRequest::BatchType batchType = item.getOpType();
+//
+// Types for comparing shard endpoints in a map
+//
- if (batchType == BatchedCommandRequest::BatchType_Insert) {
- return item.getDocument().objsize();
- }
- else if (batchType == BatchedCommandRequest::BatchType_Update) {
- // Note: Be conservative here - it's okay if we send slightly too many batches
- int estSize = item.getUpdate()->getQuery().objsize()
- + item.getUpdate()->getUpdateExpr().objsize() + kEstUpdateOverheadBytes;
- dassert(estSize >= item.getUpdate()->toBSON().objsize());
- return estSize;
- }
- else {
- dassert( batchType == BatchedCommandRequest::BatchType_Delete );
- // Note: Be conservative here - it's okay if we send slightly too many batches
- int estSize = item.getDelete()->getQuery().objsize() + kEstDeleteOverheadBytes;
- dassert(estSize >= item.getDelete()->toBSON().objsize());
- return estSize;
- }
+struct EndpointComp {
+ bool operator()(const ShardEndpoint* endpointA, const ShardEndpoint* endpointB) const {
+ return compareEndpoints(endpointA, endpointB) < 0;
}
+};
- // Helper to determine whether a number of targeted writes require a new targeted batch
- static bool wouldMakeBatchesTooBig(const vector<TargetedWrite*>& writes,
- int writeSizeBytes,
- const TargetedBatchSizeMap& batchSizes) {
+typedef std::map<const ShardEndpoint*, TargetedWriteBatch*, EndpointComp> TargetedBatchMap;
- for (vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end(); ++it) {
+//
+// Types for tracking batch sizes
+//
- const TargetedWrite* write = *it;
- TargetedBatchSizeMap::const_iterator seenIt = batchSizes.find(&write->endpoint);
+struct BatchSize {
+ BatchSize() : numOps(0), sizeBytes(0) {}
- if (seenIt == batchSizes.end()) {
- // If this is the first item in the batch, it can't be too big
- continue;
- }
+ int numOps;
+ int sizeBytes;
+};
- const BatchSize& batchSize = seenIt->second;
+typedef std::map<const ShardEndpoint*, BatchSize, EndpointComp> TargetedBatchSizeMap;
+}
- if (batchSize.numOps >= static_cast<int>(BatchedCommandRequest::kMaxWriteBatchSize)) {
- // Too many items in batch
- return true;
- }
+static void buildTargetError(const Status& errStatus, WriteErrorDetail* details) {
+ details->setErrCode(errStatus.code());
+ details->setErrMessage(errStatus.reason());
+}
- if (batchSize.sizeBytes + writeSizeBytes > BSONObjMaxUserSize) {
- // Batch would be too big
- return true;
- }
+// Helper to determine whether a number of targeted writes require a new targeted batch
+static bool isNewBatchRequired(const vector<TargetedWrite*>& writes,
+ const TargetedBatchMap& batchMap) {
+ for (vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end(); ++it) {
+ TargetedWrite* write = *it;
+ if (batchMap.find(&write->endpoint) == batchMap.end()) {
+ return true;
}
-
- return false;
}
- // Helper function to cancel all the write ops of targeted batches in a map
- static void cancelBatches( const WriteErrorDetail& why,
- WriteOp* writeOps,
- TargetedBatchMap* batchMap ) {
-
- set<WriteOp*> targetedWriteOps;
+ return false;
+}
- // Collect all the writeOps that are currently targeted
- for ( TargetedBatchMap::iterator it = batchMap->begin(); it != batchMap->end(); ) {
+// MAGIC NUMBERS
+// Before serializing updates/deletes, we don't know how big their fields would be, but we break
+// batches before serializing.
+// TODO: Revisit when we revisit command limits in general
+static const int kEstUpdateOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
+static const int kEstDeleteOverheadBytes = (BSONObjMaxInternalSize - BSONObjMaxUserSize) / 100;
+
+static int getWriteSizeBytes(const WriteOp& writeOp) {
+ const BatchItemRef& item = writeOp.getWriteItem();
+ BatchedCommandRequest::BatchType batchType = item.getOpType();
+
+ if (batchType == BatchedCommandRequest::BatchType_Insert) {
+ return item.getDocument().objsize();
+ } else if (batchType == BatchedCommandRequest::BatchType_Update) {
+ // Note: Be conservative here - it's okay if we send slightly too many batches
+ int estSize = item.getUpdate()->getQuery().objsize() +
+ item.getUpdate()->getUpdateExpr().objsize() + kEstUpdateOverheadBytes;
+ dassert(estSize >= item.getUpdate()->toBSON().objsize());
+ return estSize;
+ } else {
+ dassert(batchType == BatchedCommandRequest::BatchType_Delete);
+ // Note: Be conservative here - it's okay if we send slightly too many batches
+ int estSize = item.getDelete()->getQuery().objsize() + kEstDeleteOverheadBytes;
+ dassert(estSize >= item.getDelete()->toBSON().objsize());
+ return estSize;
+ }
+}
- TargetedWriteBatch* batch = it->second;
- const vector<TargetedWrite*>& writes = batch->getWrites();
+// Helper to determine whether a number of targeted writes require a new targeted batch
+static bool wouldMakeBatchesTooBig(const vector<TargetedWrite*>& writes,
+ int writeSizeBytes,
+ const TargetedBatchSizeMap& batchSizes) {
+ for (vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end(); ++it) {
+ const TargetedWrite* write = *it;
+ TargetedBatchSizeMap::const_iterator seenIt = batchSizes.find(&write->endpoint);
- for ( vector<TargetedWrite*>::const_iterator writeIt = writes.begin();
- writeIt != writes.end(); ++writeIt ) {
+ if (seenIt == batchSizes.end()) {
+ // If this is the first item in the batch, it can't be too big
+ continue;
+ }
- TargetedWrite* write = *writeIt;
+ const BatchSize& batchSize = seenIt->second;
- // NOTE: We may repeatedly cancel a write op here, but that's fast and we want to
- // cancel before erasing the TargetedWrite* (which owns the cancelled targeting
- // info) for reporting reasons.
- writeOps[write->writeOpRef.first].cancelWrites( &why );
- }
+ if (batchSize.numOps >= static_cast<int>(BatchedCommandRequest::kMaxWriteBatchSize)) {
+ // Too many items in batch
+ return true;
+ }
- // Note that we need to *erase* first, *then* delete, since the map keys are ptrs from
- // the values
- batchMap->erase( it++ );
- delete batch;
+ if (batchSize.sizeBytes + writeSizeBytes > BSONObjMaxUserSize) {
+ // Batch would be too big
+ return true;
}
- batchMap->clear();
}
- Status BatchWriteOp::targetBatch( const NSTargeter& targeter,
- bool recordTargetErrors,
- vector<TargetedWriteBatch*>* targetedBatches ) {
-
- //
- // Targeting of unordered batches is fairly simple - each remaining write op is targeted,
- // and each of those targeted writes are grouped into a batch for a particular shard
- // endpoint.
- //
- // Targeting of ordered batches is a bit more complex - to respect the ordering of the
- // batch, we can only send:
- // A) a single targeted batch to one shard endpoint
- // B) multiple targeted batches, but only containing targeted writes for a single write op
- //
- // This means that any multi-shard write operation must be targeted and sent one-by-one.
- // Subsequent single-shard write operations can be batched together if they go to the same
- // place.
- //
- // Ex: ShardA : { skey : a->k }, ShardB : { skey : k->z }
- //
- // Ordered insert batch of: [{ skey : a }, { skey : b }, { skey : x }]
- // broken into:
- // [{ skey : a }, { skey : b }],
- // [{ skey : x }]
- //
- // Ordered update Batch of :
- // [{ skey : a }{ $push },
- // { skey : b }{ $push },
- // { skey : [c, x] }{ $push },
- // { skey : y }{ $push },
- // { skey : z }{ $push }]
- // broken into:
- // [{ skey : a }, { skey : b }],
- // [{ skey : [c,x] }],
- // [{ skey : y }, { skey : z }]
- //
-
- const bool ordered = _clientRequest->getOrdered();
-
- TargetedBatchMap batchMap;
- TargetedBatchSizeMap batchSizes;
+ return false;
+}
- int numTargetErrors = 0;
+// Helper function to cancel all the write ops of targeted batches in a map
+static void cancelBatches(const WriteErrorDetail& why,
+ WriteOp* writeOps,
+ TargetedBatchMap* batchMap) {
+ set<WriteOp*> targetedWriteOps;
- size_t numWriteOps = _clientRequest->sizeWriteOps();
- for ( size_t i = 0; i < numWriteOps; ++i ) {
+ // Collect all the writeOps that are currently targeted
+ for (TargetedBatchMap::iterator it = batchMap->begin(); it != batchMap->end();) {
+ TargetedWriteBatch* batch = it->second;
+ const vector<TargetedWrite*>& writes = batch->getWrites();
- WriteOp& writeOp = _writeOps[i];
+ for (vector<TargetedWrite*>::const_iterator writeIt = writes.begin();
+ writeIt != writes.end();
+ ++writeIt) {
+ TargetedWrite* write = *writeIt;
- // Only target _Ready ops
- if ( writeOp.getWriteState() != WriteOpState_Ready ) continue;
+ // NOTE: We may repeatedly cancel a write op here, but that's fast and we want to
+ // cancel before erasing the TargetedWrite* (which owns the cancelled targeting
+ // info) for reporting reasons.
+ writeOps[write->writeOpRef.first].cancelWrites(&why);
+ }
- //
- // Get TargetedWrites from the targeter for the write operation
- //
+ // Note that we need to *erase* first, *then* delete, since the map keys are ptrs from
+ // the values
+ batchMap->erase(it++);
+ delete batch;
+ }
+ batchMap->clear();
+}
- // TargetedWrites need to be owned once returned
- OwnedPointerVector<TargetedWrite> writesOwned;
- vector<TargetedWrite*>& writes = writesOwned.mutableVector();
+Status BatchWriteOp::targetBatch(const NSTargeter& targeter,
+ bool recordTargetErrors,
+ vector<TargetedWriteBatch*>* targetedBatches) {
+ //
+ // Targeting of unordered batches is fairly simple - each remaining write op is targeted,
+ // and each of those targeted writes are grouped into a batch for a particular shard
+ // endpoint.
+ //
+ // Targeting of ordered batches is a bit more complex - to respect the ordering of the
+ // batch, we can only send:
+ // A) a single targeted batch to one shard endpoint
+ // B) multiple targeted batches, but only containing targeted writes for a single write op
+ //
+ // This means that any multi-shard write operation must be targeted and sent one-by-one.
+ // Subsequent single-shard write operations can be batched together if they go to the same
+ // place.
+ //
+ // Ex: ShardA : { skey : a->k }, ShardB : { skey : k->z }
+ //
+ // Ordered insert batch of: [{ skey : a }, { skey : b }, { skey : x }]
+ // broken into:
+ // [{ skey : a }, { skey : b }],
+ // [{ skey : x }]
+ //
+ // Ordered update Batch of :
+ // [{ skey : a }{ $push },
+ // { skey : b }{ $push },
+ // { skey : [c, x] }{ $push },
+ // { skey : y }{ $push },
+ // { skey : z }{ $push }]
+ // broken into:
+ // [{ skey : a }, { skey : b }],
+ // [{ skey : [c,x] }],
+ // [{ skey : y }, { skey : z }]
+ //
- Status targetStatus = writeOp.targetWrites( targeter, &writes );
+ const bool ordered = _clientRequest->getOrdered();
- if ( !targetStatus.isOK() ) {
+ TargetedBatchMap batchMap;
+ TargetedBatchSizeMap batchSizes;
- WriteErrorDetail targetError;
- buildTargetError( targetStatus, &targetError );
+ int numTargetErrors = 0;
- if ( !recordTargetErrors ) {
+ size_t numWriteOps = _clientRequest->sizeWriteOps();
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ WriteOp& writeOp = _writeOps[i];
- // Cancel current batch state with an error
+ // Only target _Ready ops
+ if (writeOp.getWriteState() != WriteOpState_Ready)
+ continue;
- cancelBatches( targetError, _writeOps, &batchMap );
- dassert( batchMap.empty() );
- return targetStatus;
- }
- else if ( !ordered || batchMap.empty() ) {
+ //
+ // Get TargetedWrites from the targeter for the write operation
+ //
- // Record an error for this batch
+ // TargetedWrites need to be owned once returned
+ OwnedPointerVector<TargetedWrite> writesOwned;
+ vector<TargetedWrite*>& writes = writesOwned.mutableVector();
- writeOp.setOpError( targetError );
- ++numTargetErrors;
+ Status targetStatus = writeOp.targetWrites(targeter, &writes);
- if ( ordered )
- return Status::OK();
+ if (!targetStatus.isOK()) {
+ WriteErrorDetail targetError;
+ buildTargetError(targetStatus, &targetError);
- continue;
- }
- else {
- dassert( ordered && !batchMap.empty() );
+ if (!recordTargetErrors) {
+ // Cancel current batch state with an error
- // Send out what we have, but don't record an error yet, since there may be an
- // error in the writes before this point.
+ cancelBatches(targetError, _writeOps, &batchMap);
+ dassert(batchMap.empty());
+ return targetStatus;
+ } else if (!ordered || batchMap.empty()) {
+ // Record an error for this batch
- writeOp.cancelWrites( &targetError );
- break;
- }
- }
+ writeOp.setOpError(targetError);
+ ++numTargetErrors;
- //
- // If ordered and we have a previous endpoint, make sure we don't need to send these
- // targeted writes to any other endpoints.
- //
+ if (ordered)
+ return Status::OK();
- if ( ordered && !batchMap.empty() ) {
+ continue;
+ } else {
+ dassert(ordered && !batchMap.empty());
- dassert( batchMap.size() == 1u );
- if ( isNewBatchRequired( writes, batchMap ) ) {
+ // Send out what we have, but don't record an error yet, since there may be an
+ // error in the writes before this point.
- writeOp.cancelWrites( NULL );
- break;
- }
+ writeOp.cancelWrites(&targetError);
+ break;
}
+ }
- //
- // If this write will push us over some sort of size limit, stop targeting
- //
+ //
+ // If ordered and we have a previous endpoint, make sure we don't need to send these
+ // targeted writes to any other endpoints.
+ //
- int writeSizeBytes = getWriteSizeBytes(writeOp);
- if (wouldMakeBatchesTooBig(writes, writeSizeBytes, batchSizes)) {
- invariant(!batchMap.empty());
+ if (ordered && !batchMap.empty()) {
+ dassert(batchMap.size() == 1u);
+ if (isNewBatchRequired(writes, batchMap)) {
writeOp.cancelWrites(NULL);
break;
}
-
- //
- // Targeting went ok, add to appropriate TargetedBatch
- //
-
- for ( vector<TargetedWrite*>::iterator it = writes.begin(); it != writes.end(); ++it ) {
-
- TargetedWrite* write = *it;
-
- TargetedBatchMap::iterator batchIt = batchMap.find( &write->endpoint );
- TargetedBatchSizeMap::iterator batchSizeIt = batchSizes.find( &write->endpoint );
-
- if ( batchIt == batchMap.end() ) {
- TargetedWriteBatch* newBatch = new TargetedWriteBatch( write->endpoint );
- batchIt = batchMap.insert( make_pair( &newBatch->getEndpoint(),
- newBatch ) ).first;
- batchSizeIt = batchSizes.insert(make_pair(&newBatch->getEndpoint(),
- BatchSize())).first;
- }
-
- TargetedWriteBatch* batch = batchIt->second;
- BatchSize& batchSize = batchSizeIt->second;
-
- ++batchSize.numOps;
- batchSize.sizeBytes += writeSizeBytes;
- batch->addWrite( write );
- }
-
- // Relinquish ownership of TargetedWrites, now the TargetedBatches own them
- writesOwned.mutableVector().clear();
-
- //
- // Break if we're ordered and we have more than one endpoint - later writes cannot be
- // enforced as ordered across multiple shard endpoints.
- //
-
- if ( ordered && batchMap.size() > 1u )
- break;
}
//
- // Send back our targeted batches
+ // If this write will push us over some sort of size limit, stop targeting
//
- for ( TargetedBatchMap::iterator it = batchMap.begin(); it != batchMap.end(); ++it ) {
-
- TargetedWriteBatch* batch = it->second;
-
- if ( batch->getWrites().empty() )
- continue;
-
- // Remember targeted batch for reporting
- _targeted.insert( batch );
- // Send the handle back to caller
- targetedBatches->push_back( batch );
+ int writeSizeBytes = getWriteSizeBytes(writeOp);
+ if (wouldMakeBatchesTooBig(writes, writeSizeBytes, batchSizes)) {
+ invariant(!batchMap.empty());
+ writeOp.cancelWrites(NULL);
+ break;
}
- return Status::OK();
- }
-
- void BatchWriteOp::buildBatchRequest( const TargetedWriteBatch& targetedBatch,
- BatchedCommandRequest* request ) const {
-
- request->setNS( _clientRequest->getNS() );
- request->setShouldBypassValidation(_clientRequest->shouldBypassValidation());
-
- const vector<TargetedWrite*>& targetedWrites = targetedBatch.getWrites();
+ //
+ // Targeting went ok, add to appropriate TargetedBatch
+ //
- for ( vector<TargetedWrite*>::const_iterator it = targetedWrites.begin();
- it != targetedWrites.end(); ++it ) {
+ for (vector<TargetedWrite*>::iterator it = writes.begin(); it != writes.end(); ++it) {
+ TargetedWrite* write = *it;
- const WriteOpRef& writeOpRef = ( *it )->writeOpRef;
- BatchedCommandRequest::BatchType batchType = _clientRequest->getBatchType();
+ TargetedBatchMap::iterator batchIt = batchMap.find(&write->endpoint);
+ TargetedBatchSizeMap::iterator batchSizeIt = batchSizes.find(&write->endpoint);
- // NOTE: We copy the batch items themselves here from the client request
- // TODO: This could be inefficient, maybe we want to just reference in the future
- if ( batchType == BatchedCommandRequest::BatchType_Insert ) {
- BatchedInsertRequest* clientInsertRequest = _clientRequest->getInsertRequest();
- BSONObj insertDoc = clientInsertRequest->getDocumentsAt( writeOpRef.first );
- request->getInsertRequest()->addToDocuments( insertDoc );
- }
- else if ( batchType == BatchedCommandRequest::BatchType_Update ) {
- BatchedUpdateRequest* clientUpdateRequest = _clientRequest->getUpdateRequest();
- BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
- clientUpdateRequest->getUpdatesAt( writeOpRef.first )->cloneTo( updateDoc );
- request->getUpdateRequest()->addToUpdates( updateDoc );
- }
- else {
- dassert( batchType == BatchedCommandRequest::BatchType_Delete );
- BatchedDeleteRequest* clientDeleteRequest = _clientRequest->getDeleteRequest();
- BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
- clientDeleteRequest->getDeletesAt( writeOpRef.first )->cloneTo( deleteDoc );
- request->getDeleteRequest()->addToDeletes( deleteDoc );
+ if (batchIt == batchMap.end()) {
+ TargetedWriteBatch* newBatch = new TargetedWriteBatch(write->endpoint);
+ batchIt = batchMap.insert(make_pair(&newBatch->getEndpoint(), newBatch)).first;
+ batchSizeIt =
+ batchSizes.insert(make_pair(&newBatch->getEndpoint(), BatchSize())).first;
}
- // TODO: We can add logic here to allow aborting individual ops
- //if ( NULL == response ) {
- // ->responses.erase( it++ );
- // continue;
- //}
- }
+ TargetedWriteBatch* batch = batchIt->second;
+ BatchSize& batchSize = batchSizeIt->second;
- if ( _clientRequest->isWriteConcernSet() ) {
- if ( _clientRequest->isVerboseWC() ) {
- request->setWriteConcern( _clientRequest->getWriteConcern() );
- }
- else {
- // Mongos needs to send to the shard with w > 0 so it will be able to
- // see the writeErrors.
- request->setWriteConcern( upgradeWriteConcern(
- _clientRequest->getWriteConcern() ));
- }
+ ++batchSize.numOps;
+ batchSize.sizeBytes += writeSizeBytes;
+ batch->addWrite(write);
}
- if ( !request->isOrderedSet() ) {
- request->setOrdered( _clientRequest->getOrdered() );
- }
+ // Relinquish ownership of TargetedWrites, now the TargetedBatches own them
+ writesOwned.mutableVector().clear();
+
+ //
+ // Break if we're ordered and we have more than one endpoint - later writes cannot be
+ // enforced as ordered across multiple shard endpoints.
+ //
- unique_ptr<BatchedRequestMetadata> requestMetadata( new BatchedRequestMetadata() );
- requestMetadata->setShardName( targetedBatch.getEndpoint().shardName );
- requestMetadata->setShardVersion( targetedBatch.getEndpoint().shardVersion );
- requestMetadata->setSession( 0 );
- request->setMetadata( requestMetadata.release() );
+ if (ordered && batchMap.size() > 1u)
+ break;
}
//
- // Helpers for manipulating batch responses
+ // Send back our targeted batches
//
- namespace {
- struct WriteErrorDetailComp {
- bool operator()( const WriteErrorDetail* errorA,
- const WriteErrorDetail* errorB ) const {
- return errorA->getIndex() < errorB->getIndex();
- }
- };
- }
+ for (TargetedBatchMap::iterator it = batchMap.begin(); it != batchMap.end(); ++it) {
+ TargetedWriteBatch* batch = it->second;
+
+ if (batch->getWrites().empty())
+ continue;
- static void cloneCommandErrorTo( const BatchedCommandResponse& batchResp,
- WriteErrorDetail* details ) {
- details->setErrCode( batchResp.getErrCode() );
- details->setErrMessage( batchResp.getErrMessage() );
+ // Remember targeted batch for reporting
+ _targeted.insert(batch);
+ // Send the handle back to caller
+ targetedBatches->push_back(batch);
}
- // Given *either* a batch error or an array of per-item errors, copies errors we're interested
- // in into a TrackedErrorMap
- static void trackErrors( const ShardEndpoint& endpoint,
- const vector<WriteErrorDetail*> itemErrors,
- TrackedErrors* trackedErrors ) {
- for ( vector<WriteErrorDetail*>::const_iterator it = itemErrors.begin();
- it != itemErrors.end(); ++it ) {
+ return Status::OK();
+}
- const WriteErrorDetail* error = *it;
+void BatchWriteOp::buildBatchRequest(const TargetedWriteBatch& targetedBatch,
+ BatchedCommandRequest* request) const {
+ request->setNS(_clientRequest->getNS());
+ request->setShouldBypassValidation(_clientRequest->shouldBypassValidation());
- if ( trackedErrors->isTracking( error->getErrCode() ) ) {
- trackedErrors->addError( new ShardError( endpoint, *error ) );
- }
- }
- }
+ const vector<TargetedWrite*>& targetedWrites = targetedBatch.getWrites();
- static void incBatchStats( BatchedCommandRequest::BatchType batchType,
- const BatchedCommandResponse& response,
- BatchWriteStats* stats ) {
+ for (vector<TargetedWrite*>::const_iterator it = targetedWrites.begin();
+ it != targetedWrites.end();
+ ++it) {
+ const WriteOpRef& writeOpRef = (*it)->writeOpRef;
+ BatchedCommandRequest::BatchType batchType = _clientRequest->getBatchType();
- if ( batchType == BatchedCommandRequest::BatchType_Insert) {
- stats->numInserted += response.getN();
- }
- else if ( batchType == BatchedCommandRequest::BatchType_Update ) {
- int numUpserted = 0;
- if( response.isUpsertDetailsSet() ) {
- numUpserted = response.sizeUpsertDetails();
- }
- stats->numMatched += ( response.getN() - numUpserted );
- long long numModified = response.getNModified();
+ // NOTE: We copy the batch items themselves here from the client request
+ // TODO: This could be inefficient, maybe we want to just reference in the future
+ if (batchType == BatchedCommandRequest::BatchType_Insert) {
+ BatchedInsertRequest* clientInsertRequest = _clientRequest->getInsertRequest();
+ BSONObj insertDoc = clientInsertRequest->getDocumentsAt(writeOpRef.first);
+ request->getInsertRequest()->addToDocuments(insertDoc);
+ } else if (batchType == BatchedCommandRequest::BatchType_Update) {
+ BatchedUpdateRequest* clientUpdateRequest = _clientRequest->getUpdateRequest();
+ BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
+ clientUpdateRequest->getUpdatesAt(writeOpRef.first)->cloneTo(updateDoc);
+ request->getUpdateRequest()->addToUpdates(updateDoc);
+ } else {
+ dassert(batchType == BatchedCommandRequest::BatchType_Delete);
+ BatchedDeleteRequest* clientDeleteRequest = _clientRequest->getDeleteRequest();
+ BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
+ clientDeleteRequest->getDeletesAt(writeOpRef.first)->cloneTo(deleteDoc);
+ request->getDeleteRequest()->addToDeletes(deleteDoc);
+ }
+
+ // TODO: We can add logic here to allow aborting individual ops
+ // if ( NULL == response ) {
+ // ->responses.erase( it++ );
+ // continue;
+ //}
+ }
+
+ if (_clientRequest->isWriteConcernSet()) {
+ if (_clientRequest->isVerboseWC()) {
+ request->setWriteConcern(_clientRequest->getWriteConcern());
+ } else {
+ // Mongos needs to send to the shard with w > 0 so it will be able to
+ // see the writeErrors.
+ request->setWriteConcern(upgradeWriteConcern(_clientRequest->getWriteConcern()));
+ }
+ }
+
+ if (!request->isOrderedSet()) {
+ request->setOrdered(_clientRequest->getOrdered());
+ }
+
+ unique_ptr<BatchedRequestMetadata> requestMetadata(new BatchedRequestMetadata());
+ requestMetadata->setShardName(targetedBatch.getEndpoint().shardName);
+ requestMetadata->setShardVersion(targetedBatch.getEndpoint().shardVersion);
+ requestMetadata->setSession(0);
+ request->setMetadata(requestMetadata.release());
+}
- if (numModified >= 0)
- stats->numModified += numModified;
- else
- stats->numModified = -1; // sentinel used to indicate we omit the field downstream
+//
+// Helpers for manipulating batch responses
+//
- stats->numUpserted += numUpserted;
- }
- else {
- dassert( batchType == BatchedCommandRequest::BatchType_Delete );
- stats->numDeleted += response.getN();
- }
+namespace {
+struct WriteErrorDetailComp {
+ bool operator()(const WriteErrorDetail* errorA, const WriteErrorDetail* errorB) const {
+ return errorA->getIndex() < errorB->getIndex();
}
+};
+}
- void BatchWriteOp::noteBatchResponse( const TargetedWriteBatch& targetedBatch,
- const BatchedCommandResponse& response,
- TrackedErrors* trackedErrors ) {
-
- if ( !response.getOk() ) {
+static void cloneCommandErrorTo(const BatchedCommandResponse& batchResp,
+ WriteErrorDetail* details) {
+ details->setErrCode(batchResp.getErrCode());
+ details->setErrMessage(batchResp.getErrMessage());
+}
- WriteErrorDetail error;
- cloneCommandErrorTo( response, &error );
+// Given *either* a batch error or an array of per-item errors, copies errors we're interested
+// in into a TrackedErrorMap
+static void trackErrors(const ShardEndpoint& endpoint,
+ const vector<WriteErrorDetail*> itemErrors,
+ TrackedErrors* trackedErrors) {
+ for (vector<WriteErrorDetail*>::const_iterator it = itemErrors.begin(); it != itemErrors.end();
+ ++it) {
+ const WriteErrorDetail* error = *it;
- // Treat command errors exactly like other failures of the batch
- // Note that no errors will be tracked from these failures - as-designed
- noteBatchError( targetedBatch, error );
- return;
+ if (trackedErrors->isTracking(error->getErrCode())) {
+ trackedErrors->addError(new ShardError(endpoint, *error));
}
+ }
+}
- dassert( response.getOk() );
-
- // Stop tracking targeted batch
- _targeted.erase( &targetedBatch );
+static void incBatchStats(BatchedCommandRequest::BatchType batchType,
+ const BatchedCommandResponse& response,
+ BatchWriteStats* stats) {
+ if (batchType == BatchedCommandRequest::BatchType_Insert) {
+ stats->numInserted += response.getN();
+ } else if (batchType == BatchedCommandRequest::BatchType_Update) {
+ int numUpserted = 0;
+ if (response.isUpsertDetailsSet()) {
+ numUpserted = response.sizeUpsertDetails();
+ }
+ stats->numMatched += (response.getN() - numUpserted);
+ long long numModified = response.getNModified();
+
+ if (numModified >= 0)
+ stats->numModified += numModified;
+ else
+ stats->numModified = -1; // sentinel used to indicate we omit the field downstream
+
+ stats->numUpserted += numUpserted;
+ } else {
+ dassert(batchType == BatchedCommandRequest::BatchType_Delete);
+ stats->numDeleted += response.getN();
+ }
+}
- // Increment stats for this batch
- incBatchStats( _clientRequest->getBatchType(), response, _stats.get() );
+void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
+ const BatchedCommandResponse& response,
+ TrackedErrors* trackedErrors) {
+ if (!response.getOk()) {
+ WriteErrorDetail error;
+ cloneCommandErrorTo(response, &error);
- //
- // Assign errors to particular items.
- // Write Concern errors are stored and handled later.
- //
+ // Treat command errors exactly like other failures of the batch
+ // Note that no errors will be tracked from these failures - as-designed
+ noteBatchError(targetedBatch, error);
+ return;
+ }
- // Special handling for write concern errors, save for later
- if ( response.isWriteConcernErrorSet() ) {
- unique_ptr<ShardWCError> wcError( new ShardWCError( targetedBatch.getEndpoint(),
- *response.getWriteConcernError() ));
- _wcErrors.mutableVector().push_back( wcError.release() );
- }
+ dassert(response.getOk());
- vector<WriteErrorDetail*> itemErrors;
+ // Stop tracking targeted batch
+ _targeted.erase(&targetedBatch);
- // Handle batch and per-item errors
- if ( response.isErrDetailsSet() ) {
+ // Increment stats for this batch
+ incBatchStats(_clientRequest->getBatchType(), response, _stats.get());
- // Per-item errors were set
- itemErrors.insert( itemErrors.begin(),
- response.getErrDetails().begin(),
- response.getErrDetails().end() );
+ //
+ // Assign errors to particular items.
+ // Write Concern errors are stored and handled later.
+ //
- // Sort per-item errors by index
- std::sort( itemErrors.begin(), itemErrors.end(), WriteErrorDetailComp() );
- }
+ // Special handling for write concern errors, save for later
+ if (response.isWriteConcernErrorSet()) {
+ unique_ptr<ShardWCError> wcError(
+ new ShardWCError(targetedBatch.getEndpoint(), *response.getWriteConcernError()));
+ _wcErrors.mutableVector().push_back(wcError.release());
+ }
- //
- // Go through all pending responses of the op and sorted remote reponses, populate errors
- // This will either set all errors to the batch error or apply per-item errors as-needed
- //
- // If the batch is ordered, cancel all writes after the first error for retargeting.
- //
+ vector<WriteErrorDetail*> itemErrors;
- bool ordered = _clientRequest->getOrdered();
+ // Handle batch and per-item errors
+ if (response.isErrDetailsSet()) {
+ // Per-item errors were set
+ itemErrors.insert(
+ itemErrors.begin(), response.getErrDetails().begin(), response.getErrDetails().end());
- vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
- int index = 0;
- WriteErrorDetail* lastError = NULL;
- for ( vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
- it != targetedBatch.getWrites().end(); ++it, ++index ) {
+ // Sort per-item errors by index
+ std::sort(itemErrors.begin(), itemErrors.end(), WriteErrorDetailComp());
+ }
- const TargetedWrite* write = *it;
- WriteOp& writeOp = _writeOps[write->writeOpRef.first];
+ //
+ // Go through all pending responses of the op and sorted remote reponses, populate errors
+ // This will either set all errors to the batch error or apply per-item errors as-needed
+ //
+ // If the batch is ordered, cancel all writes after the first error for retargeting.
+ //
- dassert( writeOp.getWriteState() == WriteOpState_Pending );
+ bool ordered = _clientRequest->getOrdered();
- // See if we have an error for the write
- WriteErrorDetail* writeError = NULL;
+ vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
+ int index = 0;
+ WriteErrorDetail* lastError = NULL;
+ for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
+ it != targetedBatch.getWrites().end();
+ ++it, ++index) {
+ const TargetedWrite* write = *it;
+ WriteOp& writeOp = _writeOps[write->writeOpRef.first];
- if ( itemErrorIt != itemErrors.end() && ( *itemErrorIt )->getIndex() == index ) {
- // We have an per-item error for this write op's index
- writeError = *itemErrorIt;
- ++itemErrorIt;
- }
+ dassert(writeOp.getWriteState() == WriteOpState_Pending);
- // Finish the response (with error, if needed)
- if ( NULL == writeError ) {
- if ( !ordered || !lastError ){
- writeOp.noteWriteComplete( *write );
- }
- else {
- // We didn't actually apply this write - cancel so we can retarget
- dassert( writeOp.getNumTargeted() == 1u );
- writeOp.cancelWrites( lastError );
- }
- }
- else {
- writeOp.noteWriteError( *write, *writeError );
- lastError = writeError;
- }
- }
+ // See if we have an error for the write
+ WriteErrorDetail* writeError = NULL;
- // Track errors we care about, whether batch or individual errors
- if ( NULL != trackedErrors ) {
- trackErrors( targetedBatch.getEndpoint(), itemErrors, trackedErrors );
+ if (itemErrorIt != itemErrors.end() && (*itemErrorIt)->getIndex() == index) {
+ // We have an per-item error for this write op's index
+ writeError = *itemErrorIt;
+ ++itemErrorIt;
}
- // Track upserted ids if we need to
- if ( response.isUpsertDetailsSet() ) {
-
- const vector<BatchedUpsertDetail*>& upsertedIds = response.getUpsertDetails();
- for ( vector<BatchedUpsertDetail*>::const_iterator it = upsertedIds.begin();
- it != upsertedIds.end(); ++it ) {
-
- // The child upserted details don't have the correct index for the full batch
- const BatchedUpsertDetail* childUpsertedId = *it;
-
- // Work backward from the child batch item index to the batch item index
- int childBatchIndex = childUpsertedId->getIndex();
- int batchIndex = targetedBatch.getWrites()[childBatchIndex]->writeOpRef.first;
-
- // Push the upserted id with the correct index into the batch upserted ids
- BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
- upsertedId->setIndex( batchIndex );
- upsertedId->setUpsertedID( childUpsertedId->getUpsertedID() );
- _upsertedIds.mutableVector().push_back( upsertedId );
+ // Finish the response (with error, if needed)
+ if (NULL == writeError) {
+ if (!ordered || !lastError) {
+ writeOp.noteWriteComplete(*write);
+ } else {
+ // We didn't actually apply this write - cancel so we can retarget
+ dassert(writeOp.getNumTargeted() == 1u);
+ writeOp.cancelWrites(lastError);
}
+ } else {
+ writeOp.noteWriteError(*write, *writeError);
+ lastError = writeError;
}
}
- static void toWriteErrorResponse( const WriteErrorDetail& error,
- bool ordered,
- int numWrites,
- BatchedCommandResponse* writeErrResponse ) {
+ // Track errors we care about, whether batch or individual errors
+ if (NULL != trackedErrors) {
+ trackErrors(targetedBatch.getEndpoint(), itemErrors, trackedErrors);
+ }
+
+ // Track upserted ids if we need to
+ if (response.isUpsertDetailsSet()) {
+ const vector<BatchedUpsertDetail*>& upsertedIds = response.getUpsertDetails();
+ for (vector<BatchedUpsertDetail*>::const_iterator it = upsertedIds.begin();
+ it != upsertedIds.end();
+ ++it) {
+ // The child upserted details don't have the correct index for the full batch
+ const BatchedUpsertDetail* childUpsertedId = *it;
- writeErrResponse->setOk( true );
- writeErrResponse->setN( 0 );
+ // Work backward from the child batch item index to the batch item index
+ int childBatchIndex = childUpsertedId->getIndex();
+ int batchIndex = targetedBatch.getWrites()[childBatchIndex]->writeOpRef.first;
- int numErrors = ordered ? 1 : numWrites;
- for ( int i = 0; i < numErrors; i++ ) {
- unique_ptr<WriteErrorDetail> errorClone( new WriteErrorDetail );
- error.cloneTo( errorClone.get() );
- errorClone->setIndex( i );
- writeErrResponse->addToErrDetails( errorClone.release() );
+ // Push the upserted id with the correct index into the batch upserted ids
+ BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
+ upsertedId->setIndex(batchIndex);
+ upsertedId->setUpsertedID(childUpsertedId->getUpsertedID());
+ _upsertedIds.mutableVector().push_back(upsertedId);
}
-
- dassert( writeErrResponse->isValid( NULL ) );
}
+}
- void BatchWriteOp::noteBatchError( const TargetedWriteBatch& targetedBatch,
- const WriteErrorDetail& error ) {
-
- // Treat errors to get a batch response as failures of the contained writes
- BatchedCommandResponse emulatedResponse;
- toWriteErrorResponse( error,
- _clientRequest->getOrdered(),
- targetedBatch.getWrites().size(),
- &emulatedResponse );
+static void toWriteErrorResponse(const WriteErrorDetail& error,
+ bool ordered,
+ int numWrites,
+ BatchedCommandResponse* writeErrResponse) {
+ writeErrResponse->setOk(true);
+ writeErrResponse->setN(0);
- noteBatchResponse( targetedBatch, emulatedResponse, NULL );
+ int numErrors = ordered ? 1 : numWrites;
+ for (int i = 0; i < numErrors; i++) {
+ unique_ptr<WriteErrorDetail> errorClone(new WriteErrorDetail);
+ error.cloneTo(errorClone.get());
+ errorClone->setIndex(i);
+ writeErrResponse->addToErrDetails(errorClone.release());
}
- void BatchWriteOp::abortBatch( const WriteErrorDetail& error ) {
+ dassert(writeErrResponse->isValid(NULL));
+}
- dassert( !isFinished() );
- dassert( numWriteOpsIn( WriteOpState_Pending ) == 0 );
+void BatchWriteOp::noteBatchError(const TargetedWriteBatch& targetedBatch,
+ const WriteErrorDetail& error) {
+ // Treat errors to get a batch response as failures of the contained writes
+ BatchedCommandResponse emulatedResponse;
+ toWriteErrorResponse(
+ error, _clientRequest->getOrdered(), targetedBatch.getWrites().size(), &emulatedResponse);
- size_t numWriteOps = _clientRequest->sizeWriteOps();
- bool orderedOps = _clientRequest->getOrdered();
- for ( size_t i = 0; i < numWriteOps; ++i ) {
+ noteBatchResponse(targetedBatch, emulatedResponse, NULL);
+}
- WriteOp& writeOp = _writeOps[i];
- // Can only be called with no outstanding batches
- dassert( writeOp.getWriteState() != WriteOpState_Pending );
+void BatchWriteOp::abortBatch(const WriteErrorDetail& error) {
+ dassert(!isFinished());
+ dassert(numWriteOpsIn(WriteOpState_Pending) == 0);
- if ( writeOp.getWriteState() < WriteOpState_Completed ) {
+ size_t numWriteOps = _clientRequest->sizeWriteOps();
+ bool orderedOps = _clientRequest->getOrdered();
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ WriteOp& writeOp = _writeOps[i];
+ // Can only be called with no outstanding batches
+ dassert(writeOp.getWriteState() != WriteOpState_Pending);
- writeOp.setOpError( error );
+ if (writeOp.getWriteState() < WriteOpState_Completed) {
+ writeOp.setOpError(error);
- // Only one error if we're ordered
- if ( orderedOps ) break;
- }
+ // Only one error if we're ordered
+ if (orderedOps)
+ break;
}
-
- dassert( isFinished() );
}
- bool BatchWriteOp::isFinished() {
-
- size_t numWriteOps = _clientRequest->sizeWriteOps();
- bool orderedOps = _clientRequest->getOrdered();
- for ( size_t i = 0; i < numWriteOps; ++i ) {
- WriteOp& writeOp = _writeOps[i];
- if ( writeOp.getWriteState() < WriteOpState_Completed ) return false;
- else if ( orderedOps && writeOp.getWriteState() == WriteOpState_Error ) return true;
- }
+ dassert(isFinished());
+}
- return true;
+bool BatchWriteOp::isFinished() {
+ size_t numWriteOps = _clientRequest->sizeWriteOps();
+ bool orderedOps = _clientRequest->getOrdered();
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ WriteOp& writeOp = _writeOps[i];
+ if (writeOp.getWriteState() < WriteOpState_Completed)
+ return false;
+ else if (orderedOps && writeOp.getWriteState() == WriteOpState_Error)
+ return true;
}
- //
- // Aggregation functions for building the final response errors
- //
-
- void BatchWriteOp::buildClientResponse( BatchedCommandResponse* batchResp ) {
+ return true;
+}
- dassert( isFinished() );
+//
+// Aggregation functions for building the final response errors
+//
- // Result is OK
- batchResp->setOk( true );
+void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
+ dassert(isFinished());
- // For non-verbose, it's all we need.
- if ( !_clientRequest->isVerboseWC() ) {
- dassert( batchResp->isValid( NULL ) );
- return;
- }
+ // Result is OK
+ batchResp->setOk(true);
- //
- // Find all the errors in the batch
- //
+ // For non-verbose, it's all we need.
+ if (!_clientRequest->isVerboseWC()) {
+ dassert(batchResp->isValid(NULL));
+ return;
+ }
- vector<WriteOp*> errOps;
+ //
+ // Find all the errors in the batch
+ //
- size_t numWriteOps = _clientRequest->sizeWriteOps();
- for ( size_t i = 0; i < numWriteOps; ++i ) {
+ vector<WriteOp*> errOps;
- WriteOp& writeOp = _writeOps[i];
+ size_t numWriteOps = _clientRequest->sizeWriteOps();
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ WriteOp& writeOp = _writeOps[i];
- if ( writeOp.getWriteState() == WriteOpState_Error ) {
- errOps.push_back( &writeOp );
- }
+ if (writeOp.getWriteState() == WriteOpState_Error) {
+ errOps.push_back(&writeOp);
}
+ }
- //
- // Build the per-item errors.
- //
-
- if ( !errOps.empty() ) {
- for ( vector<WriteOp*>::iterator it = errOps.begin(); it != errOps.end(); ++it ) {
- WriteOp& writeOp = **it;
- WriteErrorDetail* error = new WriteErrorDetail();
- writeOp.getOpError().cloneTo( error );
- batchResp->addToErrDetails( error );
- }
- }
-
- // Only return a write concern error if everything succeeded (unordered or ordered)
- // OR if something succeeded and we're unordered
- bool reportWCError = errOps.empty()
- || ( !_clientRequest->getOrdered()
- && errOps.size() < _clientRequest->sizeWriteOps() );
- if ( !_wcErrors.empty() && reportWCError ) {
-
- WCErrorDetail* error = new WCErrorDetail;
-
- // Generate the multi-error message below
- stringstream msg;
- if ( _wcErrors.size() > 1 ) {
- msg << "multiple errors reported : ";
- error->setErrCode( ErrorCodes::WriteConcernFailed );
- }
- else {
- error->setErrCode( ( *_wcErrors.begin() )->error.getErrCode() );
- }
-
- for ( vector<ShardWCError*>::const_iterator it = _wcErrors.begin();
- it != _wcErrors.end(); ++it ) {
- const ShardWCError* wcError = *it;
- if ( it != _wcErrors.begin() )
- msg << " :: and :: ";
- msg << wcError->error.getErrMessage() << " at " << wcError->endpoint.shardName;
- }
+ //
+ // Build the per-item errors.
+ //
- error->setErrMessage( msg.str() );
- batchResp->setWriteConcernError( error );
+ if (!errOps.empty()) {
+ for (vector<WriteOp*>::iterator it = errOps.begin(); it != errOps.end(); ++it) {
+ WriteOp& writeOp = **it;
+ WriteErrorDetail* error = new WriteErrorDetail();
+ writeOp.getOpError().cloneTo(error);
+ batchResp->addToErrDetails(error);
}
+ }
- //
- // Append the upserted ids, if required
- //
+ // Only return a write concern error if everything succeeded (unordered or ordered)
+ // OR if something succeeded and we're unordered
+ bool reportWCError = errOps.empty() ||
+ (!_clientRequest->getOrdered() && errOps.size() < _clientRequest->sizeWriteOps());
+ if (!_wcErrors.empty() && reportWCError) {
+ WCErrorDetail* error = new WCErrorDetail;
- if ( _upsertedIds.size() != 0 ) {
- batchResp->setUpsertDetails( _upsertedIds.vector() );
+ // Generate the multi-error message below
+ stringstream msg;
+ if (_wcErrors.size() > 1) {
+ msg << "multiple errors reported : ";
+ error->setErrCode(ErrorCodes::WriteConcernFailed);
+ } else {
+ error->setErrCode((*_wcErrors.begin())->error.getErrCode());
}
- // Stats
- int nValue = _stats->numInserted + _stats->numUpserted + _stats->numMatched
- + _stats->numDeleted;
- batchResp->setN( nValue );
- if ( _clientRequest->getBatchType() == BatchedCommandRequest::BatchType_Update &&
- _stats->numModified >= 0) {
- batchResp->setNModified( _stats->numModified );
+ for (vector<ShardWCError*>::const_iterator it = _wcErrors.begin(); it != _wcErrors.end();
+ ++it) {
+ const ShardWCError* wcError = *it;
+ if (it != _wcErrors.begin())
+ msg << " :: and :: ";
+ msg << wcError->error.getErrMessage() << " at " << wcError->endpoint.shardName;
}
- dassert( batchResp->isValid( NULL ) );
+ error->setErrMessage(msg.str());
+ batchResp->setWriteConcernError(error);
}
- BatchWriteOp::~BatchWriteOp() {
- // Caller's responsibility to dispose of TargetedBatches
- dassert( _targeted.empty() );
-
- if ( NULL != _writeOps ) {
-
- size_t numWriteOps = _clientRequest->sizeWriteOps();
- for ( size_t i = 0; i < numWriteOps; ++i ) {
- // Placement new so manual destruct
- _writeOps[i].~WriteOp();
- }
+ //
+ // Append the upserted ids, if required
+ //
- ::operator delete[]( _writeOps );
- _writeOps = NULL;
- }
+ if (_upsertedIds.size() != 0) {
+ batchResp->setUpsertDetails(_upsertedIds.vector());
}
- int BatchWriteOp::numWriteOps() const {
- return static_cast<int>( _clientRequest->sizeWriteOps() );
+ // Stats
+ int nValue =
+ _stats->numInserted + _stats->numUpserted + _stats->numMatched + _stats->numDeleted;
+ batchResp->setN(nValue);
+ if (_clientRequest->getBatchType() == BatchedCommandRequest::BatchType_Update &&
+ _stats->numModified >= 0) {
+ batchResp->setNModified(_stats->numModified);
}
- int BatchWriteOp::numWriteOpsIn( WriteOpState opState ) const {
+ dassert(batchResp->isValid(NULL));
+}
+
+BatchWriteOp::~BatchWriteOp() {
+ // Caller's responsibility to dispose of TargetedBatches
+ dassert(_targeted.empty());
- // TODO: This could be faster, if we tracked this info explicitly
+ if (NULL != _writeOps) {
size_t numWriteOps = _clientRequest->sizeWriteOps();
- int count = 0;
- for ( size_t i = 0; i < numWriteOps; ++i ) {
- WriteOp& writeOp = _writeOps[i];
- if ( writeOp.getWriteState() == opState )
- ++count;
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ // Placement new so manual destruct
+ _writeOps[i].~WriteOp();
}
- return count;
+ ::operator delete[](_writeOps);
+ _writeOps = NULL;
}
+}
- void TrackedErrors::startTracking( int errCode ) {
- dassert( !isTracking( errCode ) );
- _errorMap.insert( make_pair( errCode, vector<ShardError*>() ) );
- }
+int BatchWriteOp::numWriteOps() const {
+ return static_cast<int>(_clientRequest->sizeWriteOps());
+}
- bool TrackedErrors::isTracking( int errCode ) const {
- return _errorMap.find( errCode ) != _errorMap.end();
+int BatchWriteOp::numWriteOpsIn(WriteOpState opState) const {
+ // TODO: This could be faster, if we tracked this info explicitly
+ size_t numWriteOps = _clientRequest->sizeWriteOps();
+ int count = 0;
+ for (size_t i = 0; i < numWriteOps; ++i) {
+ WriteOp& writeOp = _writeOps[i];
+ if (writeOp.getWriteState() == opState)
+ ++count;
}
- void TrackedErrors::addError( ShardError* error ) {
- TrackedErrorMap::iterator seenIt = _errorMap.find( error->error.getErrCode() );
- if ( seenIt == _errorMap.end() ) return;
- seenIt->second.push_back( error );
- }
+ return count;
+}
- const vector<ShardError*>& TrackedErrors::getErrors( int errCode ) const {
- dassert( isTracking( errCode ) );
- return _errorMap.find( errCode )->second;
- }
+void TrackedErrors::startTracking(int errCode) {
+ dassert(!isTracking(errCode));
+ _errorMap.insert(make_pair(errCode, vector<ShardError*>()));
+}
- void TrackedErrors::clear() {
- for ( TrackedErrorMap::iterator it = _errorMap.begin(); it != _errorMap.end(); ++it ) {
+bool TrackedErrors::isTracking(int errCode) const {
+ return _errorMap.find(errCode) != _errorMap.end();
+}
- vector<ShardError*>& errors = it->second;
+void TrackedErrors::addError(ShardError* error) {
+ TrackedErrorMap::iterator seenIt = _errorMap.find(error->error.getErrCode());
+ if (seenIt == _errorMap.end())
+ return;
+ seenIt->second.push_back(error);
+}
- for ( vector<ShardError*>::iterator errIt = errors.begin(); errIt != errors.end();
- ++errIt ) {
- delete *errIt;
- }
- errors.clear();
- }
- }
+const vector<ShardError*>& TrackedErrors::getErrors(int errCode) const {
+ dassert(isTracking(errCode));
+ return _errorMap.find(errCode)->second;
+}
+
+void TrackedErrors::clear() {
+ for (TrackedErrorMap::iterator it = _errorMap.begin(); it != _errorMap.end(); ++it) {
+ vector<ShardError*>& errors = it->second;
- TrackedErrors::~TrackedErrors() {
- clear();
+ for (vector<ShardError*>::iterator errIt = errors.begin(); errIt != errors.end(); ++errIt) {
+ delete *errIt;
+ }
+ errors.clear();
}
+}
+TrackedErrors::~TrackedErrors() {
+ clear();
+}
}
diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h
index 224d9985ef3..0add9339268 100644
--- a/src/mongo/s/write_ops/batch_write_op.h
+++ b/src/mongo/s/write_ops/batch_write_op.h
@@ -44,260 +44,244 @@
namespace mongo {
- class TargetedWriteBatch;
- struct ShardError;
- struct ShardWCError;
- class TrackedErrors;
- struct BatchWriteStats;
+class TargetedWriteBatch;
+struct ShardError;
+struct ShardWCError;
+class TrackedErrors;
+struct BatchWriteStats;
+
+/**
+ * The BatchWriteOp class manages the lifecycle of a batched write received by mongos. Each
+ * item in a batch is tracked via a WriteOp, and the function of the BatchWriteOp is to
+ * aggregate the dispatched requests and responses for the underlying WriteOps.
+ *
+ * Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecycle, with the following
+ * stages:
+ *
+ * 0) Client request comes in, batch write op is initialized
+ *
+ * 1a) One or more ops in the batch are targeted using targetBatch, resulting in
+ * TargetedWriteBatches for these ops.
+ * 1b) There are targeting errors, and the batch must be retargeted after refreshing the
+ * NSTargeter.
+ *
+ * 2) (Child BatchCommandRequests are be built for each TargetedWriteBatch before sending)
+ *
+ * 3) Responses for sent TargetedWriteBatches are noted, errors are stored and aggregated per-
+ * write-op. Errors the caller is interested in are returned.
+ *
+ * 4) If the batch write is not finished, goto 0
+ *
+ * 5) When all responses come back for all write ops, errors are aggregated and returned in
+ * a client response
+ *
+ */
+class BatchWriteOp {
+ MONGO_DISALLOW_COPYING(BatchWriteOp);
+
+public:
+ BatchWriteOp();
+
+ ~BatchWriteOp();
/**
- * The BatchWriteOp class manages the lifecycle of a batched write received by mongos. Each
- * item in a batch is tracked via a WriteOp, and the function of the BatchWriteOp is to
- * aggregate the dispatched requests and responses for the underlying WriteOps.
- *
- * Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecycle, with the following
- * stages:
- *
- * 0) Client request comes in, batch write op is initialized
- *
- * 1a) One or more ops in the batch are targeted using targetBatch, resulting in
- * TargetedWriteBatches for these ops.
- * 1b) There are targeting errors, and the batch must be retargeted after refreshing the
- * NSTargeter.
+ * Initializes the BatchWriteOp from a client batch request.
+ */
+ void initClientRequest(const BatchedCommandRequest* clientRequest);
+
+ /**
+ * Targets one or more of the next write ops in this batch op using a NSTargeter. The
+ * resulting TargetedWrites are aggregated together in the returned TargetedWriteBatches.
*
- * 2) (Child BatchCommandRequests are be built for each TargetedWriteBatch before sending)
+ * If 'recordTargetErrors' is false, any targeting error will abort all current batches and
+ * the method will return the targeting error. No targetedBatches will be returned on
+ * error.
*
- * 3) Responses for sent TargetedWriteBatches are noted, errors are stored and aggregated per-
- * write-op. Errors the caller is interested in are returned.
+ * Otherwise, if 'recordTargetErrors' is true, targeting errors will be recorded for each
+ * write op that fails to target, and the method will return OK.
*
- * 4) If the batch write is not finished, goto 0
+ * (The idea here is that if we are sure our NSTargeter is up-to-date we should record
+ * targeting errors, but if not we should refresh once first.)
*
- * 5) When all responses come back for all write ops, errors are aggregated and returned in
- * a client response
+ * Returned TargetedWriteBatches are owned by the caller.
+ */
+ Status targetBatch(const NSTargeter& targeter,
+ bool recordTargetErrors,
+ std::vector<TargetedWriteBatch*>* targetedBatches);
+
+ /**
+ * Fills a BatchCommandRequest from a TargetedWriteBatch for this BatchWriteOp.
+ */
+ void buildBatchRequest(const TargetedWriteBatch& targetedBatch,
+ BatchedCommandRequest* request) const;
+
+ /**
+ * Stores a response from one of the outstanding TargetedWriteBatches for this BatchWriteOp.
+ * The response may be in any form, error or not.
*
+ * There is an additional optional 'trackedErrors' parameter, which can be used to return
+ * copies of any write errors in the response that the caller is interested in (specified by
+ * errCode). (This avoids external callers needing to know much about the response format.)
*/
- class BatchWriteOp {
- MONGO_DISALLOW_COPYING(BatchWriteOp);
- public:
-
- BatchWriteOp();
-
- ~BatchWriteOp();
-
- /**
- * Initializes the BatchWriteOp from a client batch request.
- */
- void initClientRequest( const BatchedCommandRequest* clientRequest );
-
- /**
- * Targets one or more of the next write ops in this batch op using a NSTargeter. The
- * resulting TargetedWrites are aggregated together in the returned TargetedWriteBatches.
- *
- * If 'recordTargetErrors' is false, any targeting error will abort all current batches and
- * the method will return the targeting error. No targetedBatches will be returned on
- * error.
- *
- * Otherwise, if 'recordTargetErrors' is true, targeting errors will be recorded for each
- * write op that fails to target, and the method will return OK.
- *
- * (The idea here is that if we are sure our NSTargeter is up-to-date we should record
- * targeting errors, but if not we should refresh once first.)
- *
- * Returned TargetedWriteBatches are owned by the caller.
- */
- Status targetBatch( const NSTargeter& targeter,
- bool recordTargetErrors,
- std::vector<TargetedWriteBatch*>* targetedBatches );
-
- /**
- * Fills a BatchCommandRequest from a TargetedWriteBatch for this BatchWriteOp.
- */
- void buildBatchRequest( const TargetedWriteBatch& targetedBatch,
- BatchedCommandRequest* request ) const;
-
- /**
- * Stores a response from one of the outstanding TargetedWriteBatches for this BatchWriteOp.
- * The response may be in any form, error or not.
- *
- * There is an additional optional 'trackedErrors' parameter, which can be used to return
- * copies of any write errors in the response that the caller is interested in (specified by
- * errCode). (This avoids external callers needing to know much about the response format.)
- */
- void noteBatchResponse( const TargetedWriteBatch& targetedBatch,
- const BatchedCommandResponse& response,
- TrackedErrors* trackedErrors );
-
- /**
- * Stores an error that occurred trying to send/recv a TargetedWriteBatch for this
- * BatchWriteOp.
- */
- void noteBatchError( const TargetedWriteBatch& targetedBatch,
- const WriteErrorDetail& error );
-
- /**
- * Aborts any further writes in the batch with the provided error. There must be no pending
- * ops awaiting results when a batch is aborted.
- *
- * Batch is finished immediately after aborting.
- */
- void abortBatch( const WriteErrorDetail& error );
-
- /**
- * Returns false if the batch write op needs more processing.
- */
- bool isFinished();
-
- /**
- * Fills a batch response to send back to the client.
- */
- void buildClientResponse( BatchedCommandResponse* batchResp );
-
- //
- // Accessors
- //
-
- int numWriteOps() const;
-
- int numWriteOpsIn( WriteOpState state ) const;
-
- private:
-
- // Incoming client request, not owned here
- const BatchedCommandRequest* _clientRequest;
-
- // Array of ops being processed from the client request
- WriteOp* _writeOps;
-
- // Current outstanding batch op write requests
- // Not owned here but tracked for reporting
- std::set<const TargetedWriteBatch*> _targeted;
-
- // Write concern responses from all write batches so far
- OwnedPointerVector<ShardWCError> _wcErrors;
-
- // Upserted ids for the whole write batch
- OwnedPointerVector<BatchedUpsertDetail> _upsertedIds;
-
- // Stats for the entire batch op
- std::unique_ptr<BatchWriteStats> _stats;
- };
-
- struct BatchWriteStats {
-
- BatchWriteStats();
-
- int numInserted;
- int numUpserted;
- int numMatched;
- int numModified;
- int numDeleted;
-
- std::string toString() const {
- StringBuilder str;
- str << "numInserted: " << numInserted
- << " numUpserted: " << numUpserted
- << " numMatched: " << numMatched
- << " numModified: " << numModified
- << " numDeleted: " << numDeleted;
- return str.str();
- }
-
- };
+ void noteBatchResponse(const TargetedWriteBatch& targetedBatch,
+ const BatchedCommandResponse& response,
+ TrackedErrors* trackedErrors);
/**
- * Data structure representing the information needed to make a batch request, along with
- * pointers to where the resulting responses should be placed.
+ * Stores an error that occurred trying to send/recv a TargetedWriteBatch for this
+ * BatchWriteOp.
+ */
+ void noteBatchError(const TargetedWriteBatch& targetedBatch, const WriteErrorDetail& error);
+
+ /**
+ * Aborts any further writes in the batch with the provided error. There must be no pending
+ * ops awaiting results when a batch is aborted.
*
- * Internal support for storage as a doubly-linked list, to allow the TargetedWriteBatch to
- * efficiently be registered for reporting.
+ * Batch is finished immediately after aborting.
*/
- class TargetedWriteBatch {
- MONGO_DISALLOW_COPYING(TargetedWriteBatch);
- public:
+ void abortBatch(const WriteErrorDetail& error);
- TargetedWriteBatch( const ShardEndpoint& endpoint ) :
- _endpoint( endpoint ) {
- }
+ /**
+ * Returns false if the batch write op needs more processing.
+ */
+ bool isFinished();
- const ShardEndpoint& getEndpoint() const {
- return _endpoint;
- }
+ /**
+ * Fills a batch response to send back to the client.
+ */
+ void buildClientResponse(BatchedCommandResponse* batchResp);
- /**
- * TargetedWrite is owned here once given to the TargetedWriteBatch
- */
- void addWrite( TargetedWrite* targetedWrite ) {
- _writes.mutableVector().push_back( targetedWrite );
- }
+ //
+ // Accessors
+ //
- const std::vector<TargetedWrite*>& getWrites() const {
- return _writes.vector();
- }
+ int numWriteOps() const;
- private:
+ int numWriteOpsIn(WriteOpState state) const;
- // Where to send the batch
- const ShardEndpoint _endpoint;
+private:
+ // Incoming client request, not owned here
+ const BatchedCommandRequest* _clientRequest;
- // Where the responses go
- // TargetedWrite*s are owned by the TargetedWriteBatch
- OwnedPointerVector<TargetedWrite> _writes;
- };
+ // Array of ops being processed from the client request
+ WriteOp* _writeOps;
- /**
- * Simple struct for storing an error with an endpoint.
- *
- * Certain types of errors are not stored in WriteOps or must be returned to a caller.
- */
- struct ShardError {
+ // Current outstanding batch op write requests
+ // Not owned here but tracked for reporting
+ std::set<const TargetedWriteBatch*> _targeted;
- ShardError( const ShardEndpoint& endpoint, const WriteErrorDetail& error ) :
- endpoint( endpoint ) {
- error.cloneTo( &this->error );
- }
+ // Write concern responses from all write batches so far
+ OwnedPointerVector<ShardWCError> _wcErrors;
- const ShardEndpoint endpoint;
- WriteErrorDetail error;
- };
+ // Upserted ids for the whole write batch
+ OwnedPointerVector<BatchedUpsertDetail> _upsertedIds;
- /**
- * Simple struct for storing a write concern error with an endpoint.
- *
- * Certain types of errors are not stored in WriteOps or must be returned to a caller.
- */
- struct ShardWCError {
+ // Stats for the entire batch op
+ std::unique_ptr<BatchWriteStats> _stats;
+};
+
+struct BatchWriteStats {
+ BatchWriteStats();
+
+ int numInserted;
+ int numUpserted;
+ int numMatched;
+ int numModified;
+ int numDeleted;
- ShardWCError( const ShardEndpoint& endpoint, const WCErrorDetail& error ) :
- endpoint( endpoint ) {
- error.cloneTo( &this->error );
- }
+ std::string toString() const {
+ StringBuilder str;
+ str << "numInserted: " << numInserted << " numUpserted: " << numUpserted
+ << " numMatched: " << numMatched << " numModified: " << numModified
+ << " numDeleted: " << numDeleted;
+ return str.str();
+ }
+};
- const ShardEndpoint endpoint;
- WCErrorDetail error;
- };
+/**
+ * Data structure representing the information needed to make a batch request, along with
+ * pointers to where the resulting responses should be placed.
+ *
+ * Internal support for storage as a doubly-linked list, to allow the TargetedWriteBatch to
+ * efficiently be registered for reporting.
+ */
+class TargetedWriteBatch {
+ MONGO_DISALLOW_COPYING(TargetedWriteBatch);
+
+public:
+ TargetedWriteBatch(const ShardEndpoint& endpoint) : _endpoint(endpoint) {}
+
+ const ShardEndpoint& getEndpoint() const {
+ return _endpoint;
+ }
/**
- * Helper class for tracking certain errors from batch operations
+ * TargetedWrite is owned here once given to the TargetedWriteBatch
*/
- class TrackedErrors {
- public:
+ void addWrite(TargetedWrite* targetedWrite) {
+ _writes.mutableVector().push_back(targetedWrite);
+ }
+
+ const std::vector<TargetedWrite*>& getWrites() const {
+ return _writes.vector();
+ }
+
+private:
+ // Where to send the batch
+ const ShardEndpoint _endpoint;
- ~TrackedErrors();
+ // Where the responses go
+ // TargetedWrite*s are owned by the TargetedWriteBatch
+ OwnedPointerVector<TargetedWrite> _writes;
+};
+
+/**
+ * Simple struct for storing an error with an endpoint.
+ *
+ * Certain types of errors are not stored in WriteOps or must be returned to a caller.
+ */
+struct ShardError {
+ ShardError(const ShardEndpoint& endpoint, const WriteErrorDetail& error) : endpoint(endpoint) {
+ error.cloneTo(&this->error);
+ }
- void startTracking( int errCode );
+ const ShardEndpoint endpoint;
+ WriteErrorDetail error;
+};
- bool isTracking( int errCode ) const;
+/**
+ * Simple struct for storing a write concern error with an endpoint.
+ *
+ * Certain types of errors are not stored in WriteOps or must be returned to a caller.
+ */
+struct ShardWCError {
+ ShardWCError(const ShardEndpoint& endpoint, const WCErrorDetail& error) : endpoint(endpoint) {
+ error.cloneTo(&this->error);
+ }
+
+ const ShardEndpoint endpoint;
+ WCErrorDetail error;
+};
+
+/**
+ * Helper class for tracking certain errors from batch operations
+ */
+class TrackedErrors {
+public:
+ ~TrackedErrors();
- void addError( ShardError* error );
+ void startTracking(int errCode);
- const std::vector<ShardError*>& getErrors( int errCode ) const;
+ bool isTracking(int errCode) const;
- void clear();
+ void addError(ShardError* error);
- private:
+ const std::vector<ShardError*>& getErrors(int errCode) const;
- typedef unordered_map<int, std::vector<ShardError*> > TrackedErrorMap;
- TrackedErrorMap _errorMap;
- };
+ void clear();
+private:
+ typedef unordered_map<int, std::vector<ShardError*>> TrackedErrorMap;
+ TrackedErrorMap _errorMap;
+};
}
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 6a00115531b..bb0e923cfb1 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -37,1801 +37,1747 @@
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
-
- using namespace mongo;
-
- static void initTargeterFullRange( const NamespaceString& nss,
- const ShardEndpoint& endpoint,
- MockNSTargeter* targeter ) {
-
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
- targeter->init( mockRanges );
- }
-
- static void initTargeterSplitRange( const NamespaceString& nss,
- const ShardEndpoint& endpointA,
- const ShardEndpoint& endpointB,
- MockNSTargeter* targeter ) {
-
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpointA,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << 0 ) ) );
- mockRanges.push_back( new MockRange( endpointB,
- nss,
- BSON( "x" << 0 ),
- BSON( "x" << MAXKEY ) ) );
- targeter->init( mockRanges );
- }
-
- static void initTargeterHalfRange( const NamespaceString& nss,
- const ShardEndpoint& endpoint,
- MockNSTargeter* targeter ) {
-
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << 0 ) ) );
-
- // x >= 0 values untargetable
-
- targeter->init( mockRanges );
- }
-
- static BatchedDeleteDocument* buildDelete( const BSONObj& query, int limit ) {
- BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
- deleteDoc->setQuery( query );
- deleteDoc->setLimit( limit );
- return deleteDoc;
- }
+using std::unique_ptr;
+using std::string;
+using std::vector;
+
+using namespace mongo;
+
+static void initTargeterFullRange(const NamespaceString& nss,
+ const ShardEndpoint& endpoint,
+ MockNSTargeter* targeter) {
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
+ targeter->init(mockRanges);
+}
+
+static void initTargeterSplitRange(const NamespaceString& nss,
+ const ShardEndpoint& endpointA,
+ const ShardEndpoint& endpointB,
+ MockNSTargeter* targeter) {
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
+ mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << MAXKEY)));
+ targeter->init(mockRanges);
+}
+
+static void initTargeterHalfRange(const NamespaceString& nss,
+ const ShardEndpoint& endpoint,
+ MockNSTargeter* targeter) {
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << 0)));
+
+ // x >= 0 values untargetable
+
+ targeter->init(mockRanges);
+}
+
+static BatchedDeleteDocument* buildDelete(const BSONObj& query, int limit) {
+ BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
+ deleteDoc->setQuery(query);
+ deleteDoc->setLimit(limit);
+ return deleteDoc;
+}
+
+static BatchedUpdateDocument* buildUpdate(const BSONObj& query, bool multi) {
+ BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
+ updateDoc->setUpdateExpr(BSONObj());
+ updateDoc->setQuery(query);
+ updateDoc->setMulti(multi);
+ return updateDoc;
+}
+
+static BatchedUpdateDocument* buildUpdate(const BSONObj& query,
+ const BSONObj& updateExpr,
+ bool multi) {
+ BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
+ updateDoc->setQuery(query);
+ updateDoc->setUpdateExpr(updateExpr);
+ updateDoc->setMulti(multi);
+ return updateDoc;
+}
+
+static void buildResponse(int n, BatchedCommandResponse* response) {
+ response->clear();
+ response->setOk(true);
+ response->setN(n);
+ ASSERT(response->isValid(NULL));
+}
+
+static void buildErrResponse(int code, const string& message, BatchedCommandResponse* response) {
+ response->clear();
+ response->setOk(false);
+ response->setN(0);
+ response->setErrCode(code);
+ response->setErrMessage(message);
+ ASSERT(response->isValid(NULL));
+}
+
+static void addError(int code, const string& message, int index, BatchedCommandResponse* response) {
+ unique_ptr<WriteErrorDetail> error(new WriteErrorDetail);
+ error->setErrCode(code);
+ error->setErrMessage(message);
+ error->setIndex(index);
+
+ response->addToErrDetails(error.release());
+}
+
+static void addWCError(BatchedCommandResponse* response) {
+ unique_ptr<WCErrorDetail> error(new WCErrorDetail);
+ error->setErrCode(ErrorCodes::WriteConcernFailed);
+ error->setErrMessage("mock wc error");
+
+ response->setWriteConcernError(error.release());
+}
+
+TEST(WriteOpTests, SingleOp) {
+ //
+ // Single-op targeting test
+ //
- static BatchedUpdateDocument* buildUpdate( const BSONObj& query, bool multi ) {
- BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
- updateDoc->setUpdateExpr( BSONObj() );
- updateDoc->setQuery( query );
- updateDoc->setMulti( multi );
- return updateDoc;
- }
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- static BatchedUpdateDocument* buildUpdate(const BSONObj& query,
- const BSONObj& updateExpr,
- bool multi) {
- BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
- updateDoc->setQuery( query );
- updateDoc->setUpdateExpr( updateExpr );
- updateDoc->setMulti( multi );
- return updateDoc;
- }
+ // Do single-target, single doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- static void buildResponse( int n, BatchedCommandResponse* response ) {
- response->clear();
- response->setOk( true );
- response->setN( n );
- ASSERT( response->isValid( NULL ) );
- }
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
- static void buildErrResponse( int code,
- const string& message,
- BatchedCommandResponse* response ) {
- response->clear();
- response->setOk( false );
- response->setN( 0 );
- response->setErrCode( code );
- response->setErrMessage( message );
- ASSERT( response->isValid( NULL ) );
- }
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
- static void addError( int code,
- const string& message,
- int index,
- BatchedCommandResponse* response ) {
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpoint);
- unique_ptr<WriteErrorDetail> error( new WriteErrorDetail );
- error->setErrCode( code );
- error->setErrMessage( message );
- error->setIndex( index );
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- response->addToErrDetails( error.release() );
- }
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
- static void addWCError( BatchedCommandResponse* response ) {
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+}
- unique_ptr<WCErrorDetail> error( new WCErrorDetail );
- error->setErrCode( ErrorCodes::WriteConcernFailed );
- error->setErrMessage( "mock wc error" );
+TEST(WriteOpTests, SingleError) {
+ //
+ // Single-op error test
+ //
- response->setWriteConcernError( error.release() );
- }
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- TEST(WriteOpTests, SingleOp) {
+ // Do single-target, single doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << 1), 1));
- //
- // Single-op targeting test
- //
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
- // Do single-target, single doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpoint);
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
+ BatchedCommandResponse response;
+ buildErrResponse(ErrorCodes::UnknownError, "message", &response);
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpoint );
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
- BatchedCommandResponse response;
- buildResponse( 1, &response );
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(), response.getErrCode());
+ ASSERT(clientResponse.getErrDetailsAt(0)->getErrMessage().find(response.getErrMessage()) !=
+ string::npos);
+ ASSERT_EQUALS(clientResponse.getN(), 0);
+}
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
+TEST(WriteOpTests, SingleTargetError) {
+ //
+ // Single-op targeting error test
+ //
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterHalfRange(nss, endpoint, &targeter);
- }
+ // Do untargetable delete op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << 1), 1));
- TEST(WriteOpTests, SingleError) {
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
- //
- // Single-op error test
- //
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
+ ASSERT(!status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 0u);
- // Do single-target, single doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << 1 ), 1 ) );
+ // Record targeting failures
+ status = batchOp.targetBatch(targeter, true, &targeted);
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
+ ASSERT(status.isOK());
+ ASSERT(batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 0u);
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 0);
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+}
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpoint );
+TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
+ //
+ // Write concern error test - we should pass write concern to sub-batches, and pass up the
+ // write concern error if one occurs
+ //
- BatchedCommandResponse response;
- buildErrResponse( ErrorCodes::UnknownError, "message", &response );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+ request.setWriteConcern(BSON("w" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpoint);
+
+ BatchedCommandRequest targetBatch(BatchedCommandRequest::BatchType_Insert);
+ batchOp.buildBatchRequest(*targeted.front(), &targetBatch);
+ ASSERT(targetBatch.getWriteConcern().woCompare(request.getWriteConcern()) == 0);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+ addWCError(&response);
+
+ // First stale response comes back, we should retry
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(!clientResponse.isErrDetailsSet());
+ ASSERT(clientResponse.isWriteConcernErrorSet());
+}
+
+TEST(WriteOpTests, SingleStaleError) {
+ //
+ // Single-op stale version test
+ // We should retry the same batch until we're not stale
+ //
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrCode(), response.getErrCode() );
- ASSERT( clientResponse.getErrDetailsAt( 0 )->getErrMessage()
- .find( response.getErrMessage()) != string::npos );
- ASSERT_EQUALS( clientResponse.getN(), 0 );
- }
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- TEST(WriteOpTests, SingleTargetError) {
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
- //
- // Single-op targeting error test
- //
+ BatchedCommandResponse response;
+ buildResponse(0, &response);
+ addError(ErrorCodes::StaleShardVersion, "mock stale error", 0, &response);
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterHalfRange( nss, endpoint, &targeter );
+ // First stale response comes back, we should retry
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- // Do untargetable delete op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << 1 ), 1 ) );
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
+ // Respond again with a stale response
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT( !status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 0u );
+ buildResponse(1, &response);
- // Record targeting failures
- status = batchOp.targetBatch( targeter, true, &targeted );
+ // Respond with an 'ok' response
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
- ASSERT( status.isOK() );
- ASSERT( batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 0u );
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(!clientResponse.isErrDetailsSet());
+}
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 0 );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- }
+//
+// Multi-operation batches
+//
- TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
-
- //
- // Write concern error test - we should pass write concern to sub-batches, and pass up the
- // write concern error if one occurs
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
- request.setWriteConcern( BSON( "w" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpoint );
-
- BatchedCommandRequest targetBatch( BatchedCommandRequest::BatchType_Insert );
- batchOp.buildBatchRequest( *targeted.front(), &targetBatch );
- ASSERT( targetBatch.getWriteConcern().woCompare( request.getWriteConcern() ) == 0 );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
- addWCError( &response );
-
- // First stale response comes back, we should retry
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( !clientResponse.isErrDetailsSet() );
- ASSERT( clientResponse.isWriteConcernErrorSet() );
+struct EndpointComp {
+ bool operator()(const TargetedWriteBatch* writeA, const TargetedWriteBatch* writeB) const {
+ return writeA->getEndpoint().shardName.compare(writeB->getEndpoint().shardName) < 0;
}
+};
- TEST(WriteOpTests, SingleStaleError) {
-
- //
- // Single-op stale version test
- // We should retry the same batch until we're not stale
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 0, &response );
- addError( ErrorCodes::StaleShardVersion, "mock stale error", 0, &response );
-
- // First stale response comes back, we should retry
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
-
- // Respond again with a stale response
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
-
- buildResponse( 1, &response );
-
- // Respond with an 'ok' response
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( !clientResponse.isErrDetailsSet() );
- }
+inline void sortByEndpoint(vector<TargetedWriteBatch*>* writes) {
+ std::sort(writes->begin(), writes->end(), EndpointComp());
+}
+TEST(WriteOpTests, MultiOpSameShardOrdered) {
//
- // Multi-operation batches
+ // Multi-op targeting test (ordered)
//
- struct EndpointComp {
- bool operator()( const TargetedWriteBatch* writeA,
- const TargetedWriteBatch* writeB ) const {
- return writeA->getEndpoint().shardName.compare( writeB->getEndpoint().shardName ) < 0;
- }
- };
-
- inline void sortByEndpoint( vector<TargetedWriteBatch*>* writes ) {
- std::sort( writes->begin(), writes->end(), EndpointComp() );
- }
-
- TEST(WriteOpTests, MultiOpSameShardOrdered) {
-
- //
- // Multi-op targeting test (ordered)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
-
- // Do single-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Update );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 1 ), false ) );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 2 ), false ) );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
+
+ // Do single-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 1), false));
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 2), false));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpoint);
+
+ BatchedCommandResponse response;
+ buildResponse(2, &response);
+
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+}
+
+TEST(WriteOpTests, MultiOpSameShardUnordered) {
+ //
+ // Multi-op targeting test (unordered)
+ //
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
+
+ // Do single-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 1), false));
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 2), false));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpoint);
+
+ BatchedCommandResponse response;
+ buildResponse(2, &response);
+
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+}
+
+TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
+ //
+ // Multi-op, multi-endpoing targeting test (ordered)
+ // There should be two sets of single batches (one to each shard, one-by-one)
+ //
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ // Do multi-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // Respond to first targeted batch
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointB);
+
+ // Respond to second targeted batch
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+}
+
+TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
+ //
+ // Multi-op, multi-endpoint targeting test (unordered)
+ // There should be one set of two batches (one to each shard)
+ //
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpoint );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ // Do multi-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // Respond to both targeted batches
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+}
+
+TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
+ //
+ // Multi-op (ordered) targeting test where each op goes to both shards
+ // There should be two sets of two batches to each shard (two for each delete op)
+ //
- BatchedCommandResponse response;
- buildResponse( 2, &response );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ // Do multi-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryA, 0));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryB, 0));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // Respond to both targeted batches for first multi-delete
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ // Respond to second targeted batches for second multi-delete
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 4);
+}
+
+TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
+ //
+ // Multi-op (unaordered) targeting test where each op goes to both shards
+ // There should be one set of two batches to each shard (containing writes for both ops)
+ //
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ // Do multi-target, multi-doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(queryA, true));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(queryB, true));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ BatchedCommandResponse response;
+ buildResponse(2, &response);
+
+ // Respond to both targeted batches, each containing two ops
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 4);
+}
+
+TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
+ //
+ // Multi-op (ordered) targeting test where first two ops go to one shard, second two ops
+ // go to two shards.
+ // Should batch the first two ops, then second ops should be batched separately, then
+ // last ops should be batched together
+ //
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- }
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ // These go to the same shard
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << -1), 1));
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << -2), 1));
+ // These go to both shards
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryA, 0));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryB, 0));
+ // These go to the same shard
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << 1), 1));
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << 2), 1));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+
+ BatchedCommandResponse response;
+ // Emulate one-write-per-delete-per-host
+ buildResponse(2, &response);
+
+ // Respond to first targeted batch containing the two single-host deletes
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ // Emulate one-write-per-delete-per-host
+ buildResponse(1, &response);
+
+ // Respond to two targeted batches for first multi-delete
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ // Respond to two targeted batches for second multi-delete
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ // Emulate one-write-per-delete-per-host
+ buildResponse(2, &response);
+
+ // Respond to final targeted batch containing the last two single-host deletes
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 8);
+}
+
+TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
+ //
+ // Multi-op (unordered) targeting test where first two ops go to one shard, second two ops
+ // go to two shards.
+ // Should batch all the ops together into two batches of four ops for each shard
+ //
- TEST(WriteOpTests, MultiOpSameShardUnordered) {
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ // These go to the same shard
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << -1), false));
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << -2), false));
+ // These go to both shards
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(queryA, true));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(queryB, true));
+ // These go to the same shard
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 1), false));
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 2), false));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 4u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 4u);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+
+ BatchedCommandResponse response;
+ // Emulate one-write-per-delete-per-host
+ buildResponse(4, &response);
+
+ // Respond to first targeted batch containing the two single-host deletes
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 8);
+}
+
+TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
+ //
+ // Multi-op targeting test where two ops go to two separate shards and there's an error on
+ // one op on one shard
+ // There should be one set of two batches to each shard and an error reported
+ //
- //
- // Multi-op targeting test (unordered)
- //
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // No error on first shard
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ buildResponse(0, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 0, &response);
+
+ // Error on second write on second shard
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(),
+ response.getErrDetailsAt(0)->getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrMessage(),
+ response.getErrDetailsAt(0)->getErrMessage());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+}
+
+TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
+ //
+ // Multi-op targeting test where two ops go to two separate shards and there's an error on
+ // each op on each shard
+ // There should be one set of two batches to each shard and and two errors reported
+ //
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+
+ BatchedCommandResponse response;
+ buildResponse(0, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 0, &response);
+
+ // Error on first write on first shard
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ // Error on second write on second shard
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 0);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 2u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(),
+ response.getErrDetailsAt(0)->getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrMessage(),
+ response.getErrDetailsAt(0)->getErrMessage());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 0);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getErrCode(),
+ response.getErrDetailsAt(0)->getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getErrMessage(),
+ response.getErrDetailsAt(0)->getErrMessage());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getIndex(), 1);
+}
+
+TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
+ //
+ // Multi-op targeting test where each op goes to both shards and there's an error on
+ // one op on one shard
+ // There should be one set of two batches to each shard and an error reported
+ //
- // Do single-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Update );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 1 ), false ) );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 2 ), false ) );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryA, 0));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryB, 0));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 2u);
+
+ BatchedCommandResponse response;
+ buildResponse(2, &response);
+
+ // No errors on first shard
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ buildResponse(1, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 1, &response);
+
+ // Error on second write on second shard
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 3);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(),
+ response.getErrDetailsAt(0)->getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrMessage(),
+ response.getErrDetailsAt(0)->getErrMessage());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+}
+
+TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
+ //
+ // Multi-op targeting test where each op goes to both shards and there's an error on
+ // one op on one shard
+ // There should be one set of two batches to each shard and an error reported, the second
+ // op should not get run
+ //
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ BSONObj queryA = BSON("x" << GTE << -1 << LT << 2);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryA, 0));
+ BSONObj queryB = BSON("x" << GTE << -2 << LT << 1);
+ request.getDeleteRequest()->addToDeletes(buildDelete(queryB, 0));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+ ASSERT(!batchOp.isFinished());
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 2u);
+ sortByEndpoint(&targeted);
+ assertEndpointsEqual(targeted.front()->getEndpoint(), endpointA);
+ assertEndpointsEqual(targeted.back()->getEndpoint(), endpointB);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
+ ASSERT_EQUALS(targeted.back()->getWrites().size(), 1u);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // No errors on first shard
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ buildResponse(0, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 0, &response);
+
+ // Error on second write on second shard
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(),
+ response.getErrDetailsAt(0)->getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrMessage(),
+ response.getErrDetailsAt(0)->getErrMessage());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 0);
+}
+
+//
+// Tests of edge-case functionality, lifecycle is assumed to be behaving normally
+//
+
+TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
+ //
+ // Multi-op (unordered) error and write concern error test
+ // We never report the write concern error for single-doc batches, since the error means
+ // there's no write concern applied.
+ // Don't suppress the error if ordered : false
+ //
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
+ request.setWriteConcern(BSON("w" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 1, &response);
+ addWCError(&response);
+
+ // First stale response comes back, we should retry
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ // Unordered reports write concern error
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT(clientResponse.isWriteConcernErrorSet());
+}
+
+
+TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
+ //
+ // Single-op (ordered) error and write concern error test
+ // Suppress the write concern error if ordered and we also have an error
+ //
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpoint );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ request.setOrdered(true);
+ BSONObj query = BSON("x" << GTE << -1 << LT << 2);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(query, true));
+ request.setWriteConcern(BSON("w" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+ addWCError(&response);
+
+ // First response comes back with write concern error
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ buildResponse(0, &response);
+ addError(ErrorCodes::UnknownError, "mock error", 0, &response);
+
+ // Second response comes back with write error
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ // Ordered doesn't report write concern error
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT(!clientResponse.isWriteConcernErrorSet());
+}
+
+TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
+ //
+ // Targeting failure on second op in batch op (ordered)
+ //
- BatchedCommandResponse response;
- buildResponse( 2, &response );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterHalfRange(nss, endpoint, &targeter);
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.getInsertRequest()->addToDocuments(BSON("x" << -2));
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- }
+ // Do single-target, multi-doc batch write op
- TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
-
- //
- // Multi-op, multi-endpoing targeting test (ordered)
- // There should be two sets of single batches (one to each shard, one-by-one)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- // Do multi-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // Respond to first targeted batch
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointB );
-
- // Respond to second targeted batch
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- }
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
-
- //
- // Multi-op, multi-endpoint targeting test (unordered)
- // There should be one set of two batches (one to each shard)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- // Do multi-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // Respond to both targeted batches
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- }
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
- TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
-
- //
- // Multi-op (ordered) targeting test where each op goes to both shards
- // There should be two sets of two batches to each shard (two for each delete op)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- // Do multi-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryA, 0 ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryB, 0 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // Respond to both targeted batches for first multi-delete
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- // Respond to second targeted batches for second multi-delete
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 4 );
- }
+ // First targeting round fails since we may be stale
+ ASSERT(!status.isOK());
+ ASSERT(!batchOp.isFinished());
- TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
-
- //
- // Multi-op (unaordered) targeting test where each op goes to both shards
- // There should be one set of two batches to each shard (containing writes for both ops)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- // Do multi-target, multi-doc batch write op
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Update );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getUpdateRequest()->addToUpdates( buildUpdate( queryA, true ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getUpdateRequest()->addToUpdates( buildUpdate( queryB, true ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- BatchedCommandResponse response;
- buildResponse( 2, &response );
-
- // Respond to both targeted batches, each containing two ops
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 4 );
- }
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, true, &targeted);
- TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
-
- //
- // Multi-op (ordered) targeting test where first two ops go to one shard, second two ops
- // go to two shards.
- // Should batch the first two ops, then second ops should be batched separately, then
- // last ops should be batched together
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- // These go to the same shard
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << -1 ), 1 ) );
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << -2 ), 1 ) );
- // These go to both shards
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryA, 0 ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryB, 0 ) );
- // These go to the same shard
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << 1 ), 1 ) );
- request.getDeleteRequest()->addToDeletes( buildDelete( BSON( "x" << 2 ), 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
-
- BatchedCommandResponse response;
- // Emulate one-write-per-delete-per-host
- buildResponse( 2, &response );
-
- // Respond to first targeted batch containing the two single-host deletes
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- // Emulate one-write-per-delete-per-host
- buildResponse( 1, &response );
-
- // Respond to two targeted batches for first multi-delete
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- // Respond to two targeted batches for second multi-delete
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- // Emulate one-write-per-delete-per-host
- buildResponse( 2, &response );
-
- // Respond to final targeted batch containing the last two single-host deletes
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 8 );
- }
+ // Second targeting round is ok, but should stop at first write
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
- TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
-
- //
- // Multi-op (unordered) targeting test where first two ops go to one shard, second two ops
- // go to two shards.
- // Should batch all the ops together into two batches of four ops for each shard
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Update );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- // These go to the same shard
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << -1 ), false ) );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << -2 ), false ) );
- // These go to both shards
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getUpdateRequest()->addToUpdates( buildUpdate( queryA, true ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getUpdateRequest()->addToUpdates( buildUpdate( queryB, true ) );
- // These go to the same shard
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 1 ), false ) );
- request.getUpdateRequest()->addToUpdates( buildUpdate( BSON( "x" << 2 ), false ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 4u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 4u );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
-
- BatchedCommandResponse response;
- // Emulate one-write-per-delete-per-host
- buildResponse( 4, &response );
-
- // Respond to first targeted batch containing the two single-host deletes
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 8 );
- }
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
-
- //
- // Multi-op targeting test where two ops go to two separate shards and there's an error on
- // one op on one shard
- // There should be one set of two batches to each shard and an error reported
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // No error on first shard
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- buildResponse( 0, &response );
- addError( ErrorCodes::UnknownError, "mock error", 0, &response );
-
- // Error on second write on second shard
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrCode(),
- response.getErrDetailsAt(0)->getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrMessage(),
- response.getErrDetailsAt(0)->getErrMessage() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getIndex(), 1 );
- }
+ // First response ok
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
-
- //
- // Multi-op targeting test where two ops go to two separate shards and there's an error on
- // each op on each shard
- // There should be one set of two batches to each shard and and two errors reported
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
-
- BatchedCommandResponse response;
- buildResponse( 0, &response );
- addError( ErrorCodes::UnknownError, "mock error", 0, &response );
-
- // Error on first write on first shard
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- // Error on second write on second shard
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 0 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 2u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrCode(),
- response.getErrDetailsAt(0)->getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrMessage(),
- response.getErrDetailsAt(0)->getErrMessage() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getIndex(), 0 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 1 )->getErrCode(),
- response.getErrDetailsAt(0)->getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 1 )->getErrMessage(),
- response.getErrDetailsAt(0)->getErrMessage() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 1 )->getIndex(), 1 );
- }
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, true, &targeted);
- TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
-
- //
- // Multi-op targeting test where each op goes to both shards and there's an error on
- // one op on one shard
- // There should be one set of two batches to each shard and an error reported
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryA, 0 ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryB, 0 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 2u );
-
- BatchedCommandResponse response;
- buildResponse( 2, &response );
-
- // No errors on first shard
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- buildResponse( 1, &response );
- addError( ErrorCodes::UnknownError, "mock error", 1, &response );
-
- // Error on second write on second shard
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 3 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrCode(),
- response.getErrDetailsAt(0)->getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrMessage(),
- response.getErrDetailsAt(0)->getErrMessage() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getIndex(), 1 );
- }
+ // Second targeting round results in an error which finishes the batch
+ ASSERT(status.isOK());
+ ASSERT(batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 0u);
- TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
-
- //
- // Multi-op targeting test where each op goes to both shards and there's an error on
- // one op on one shard
- // There should be one set of two batches to each shard and an error reported, the second
- // op should not get run
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- BSONObj queryA = BSON( "x" << GTE << -1 << LT << 2 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryA, 0 ) );
- BSONObj queryB = BSON( "x" << GTE << -2 << LT << 1 );
- request.getDeleteRequest()->addToDeletes( buildDelete( queryB, 0 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
- ASSERT( !batchOp.isFinished() );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 2u );
- sortByEndpoint( &targeted );
- assertEndpointsEqual( targeted.front()->getEndpoint(), endpointA );
- assertEndpointsEqual( targeted.back()->getEndpoint(), endpointB );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
- ASSERT_EQUALS( targeted.back()->getWrites().size(), 1u );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // No errors on first shard
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- buildResponse( 0, &response );
- addError( ErrorCodes::UnknownError, "mock error", 0, &response );
-
- // Error on second write on second shard
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrCode(),
- response.getErrDetailsAt(0)->getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getErrMessage(),
- response.getErrDetailsAt(0)->getErrMessage() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt( 0 )->getIndex(), 0 );
- }
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+}
+TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
//
- // Tests of edge-case functionality, lifecycle is assumed to be behaving normally
+ // Targeting failure on second op in batch op (unordered)
//
- TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
-
- //
- // Multi-op (unordered) error and write concern error test
- // We never report the write concern error for single-doc batches, since the error means
- // there's no write concern applied.
- // Don't suppress the error if ordered : false
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterFullRange( nss, endpoint, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
- request.setWriteConcern( BSON( "w" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
- addError( ErrorCodes::UnknownError, "mock error", 1, &response );
- addWCError( &response );
-
- // First stale response comes back, we should retry
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- // Unordered reports write concern error
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT( clientResponse.isWriteConcernErrorSet() );
- }
-
-
- TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
-
- //
- // Single-op (ordered) error and write concern error test
- // Suppress the write concern error if ordered and we also have an error
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Update );
- request.setNS( nss.ns() );
- request.setOrdered( true );
- BSONObj query = BSON( "x" << GTE << -1 << LT << 2 );
- request.getUpdateRequest()->addToUpdates( buildUpdate( query, true ) );
- request.setWriteConcern( BSON( "w" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
- addWCError( &response );
-
- // First response comes back with write concern error
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- buildResponse( 0, &response );
- addError( ErrorCodes::UnknownError, "mock error", 0, &response );
-
- // Second response comes back with write error
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- // Ordered doesn't report write concern error
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT( !clientResponse.isWriteConcernErrorSet() );
- }
-
- TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
-
- //
- // Targeting failure on second op in batch op (ordered)
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterHalfRange( nss, endpoint, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -2 ) );
-
- // Do single-target, multi-doc batch write op
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- // First targeting round fails since we may be stale
- ASSERT( !status.isOK() );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, true, &targeted );
-
- // Second targeting round is ok, but should stop at first write
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 1u );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterHalfRange(nss, endpoint, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.getInsertRequest()->addToDocuments(BSON("x" << -2));
+
+ // Do single-target, multi-doc batch write op
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ // First targeting round fails since we may be stale
+ ASSERT(!status.isOK());
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, true, &targeted);
+
+ // Second targeting round is ok, and should record an error
+ ASSERT(status.isOK());
+ ASSERT(!batchOp.isFinished());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+
+ BatchedCommandResponse response;
+ buildResponse(2, &response);
+
+ // Response is ok for first and third write
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+}
+
+TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
+ //
+ // Batch failure (ok : 0) reported in a multi-op batch (ordered)
+ // Expect this gets translated down into write errors for first affected write
+ //
- // First response ok
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // First shard batch is ok
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, true, &targeted);
+
+ buildErrResponse(ErrorCodes::UnknownError, "mock error", &response);
+
+ // Second shard batch fails
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ // We should have recorded an error for the second write
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(), response.getErrCode());
+}
+
+TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
+ //
+ // Batch failure (ok : 0) reported in a multi-op batch (unordered)
+ // Expect this gets translated down into write errors for all affected writes
+ //
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, true, &targeted );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // First shard batch is ok
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ buildErrResponse(ErrorCodes::UnknownError, "mock error", &response);
+
+ // Second shard batch fails
+ batchOp.noteBatchResponse(*targeted.back(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ // We should have recorded an error for the second and third write
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 2u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(), response.getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getIndex(), 2);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getErrCode(), response.getErrCode());
+}
+
+TEST(WriteOpTests, MultiOpAbortOrdered) {
+ //
+ // Batch aborted (ordered)
+ // Expect this gets translated down into write error for first affected write
+ //
- // Second targeting round results in an error which finishes the batch
- ASSERT( status.isOK() );
- ASSERT( batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 0u );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+
+ // First shard batch is ok
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ WriteErrorDetail abortError;
+ abortError.setErrCode(ErrorCodes::UnknownError);
+ abortError.setErrMessage("mock abort");
+ batchOp.abortBatch(abortError);
+ ASSERT(batchOp.isFinished());
+
+ // We should have recorded an error for the second write
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 1);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 1);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(), abortError.getErrCode());
+}
+
+TEST(WriteOpTests, MultiOpAbortUnordered) {
+ //
+ // Batch aborted (unordered)
+ // Expect this gets translated down into write errors for all affected writes
+ //
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 1 );
- }
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.setOrdered(false);
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << -2));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ WriteErrorDetail abortError;
+ abortError.setErrCode(ErrorCodes::UnknownError);
+ abortError.setErrMessage("mock abort");
+ batchOp.abortBatch(abortError);
+ ASSERT(batchOp.isFinished());
+
+ // We should have recorded an error for the first and second write
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 0);
+ ASSERT(clientResponse.isErrDetailsSet());
+ ASSERT_EQUALS(clientResponse.sizeErrDetails(), 2u);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getIndex(), 0);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(0)->getErrCode(), abortError.getErrCode());
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getIndex(), 1);
+ ASSERT_EQUALS(clientResponse.getErrDetailsAt(1)->getErrCode(), abortError.getErrCode());
+}
+
+TEST(WriteOpTests, MultiOpTwoWCErrors) {
+ //
+ // Multi-op targeting test where each op goes to both shards and both return a write concern
+ // error
+ //
- TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
+ ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
+
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << -1));
+ request.getInsertRequest()->addToDocuments(BSON("x" << 2));
+ request.setWriteConcern(BSON("w" << 3));
+
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
+
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
+ addWCError(&response);
+
+ // First shard write write concern fails.
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
+
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, true, &targeted);
+
+ // Second shard write write concern fails.
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+
+ BatchedCommandResponse clientResponse;
+ batchOp.buildClientResponse(&clientResponse);
+ ASSERT(clientResponse.getOk());
+ ASSERT_EQUALS(clientResponse.getN(), 2);
+ ASSERT(!clientResponse.isErrDetailsSet());
+ ASSERT(clientResponse.isWriteConcernErrorSet());
+}
+
+//
+// Tests of batch size limit functionality
+//
+
+TEST(WriteOpLimitTests, OneBigDoc) {
+ //
+ // Big single operation test - should go through
+ //
- //
- // Targeting failure on second op in batch op (unordered)
- //
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterHalfRange( nss, endpoint, &targeter );
+ // Create a BSONObj (slightly) bigger than the maximum size by including a max-size string
+ string bigString(BSONObjMaxUserSize, 'x');
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -2 ) );
+ // Do single-target, single doc batch write op
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1 << "data" << bigString));
- // Do single-target, multi-doc batch write op
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- // First targeting round fails since we may be stale
- ASSERT( !status.isOK() );
- ASSERT( !batchOp.isFinished() );
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+}
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, true, &targeted );
+TEST(WriteOpLimitTests, OneBigOneSmall) {
+ //
+ // Big doc with smaller additional doc - should go through as two batches
+ //
- // Second targeting round is ok, and should record an error
- ASSERT( status.isOK() );
- ASSERT( !batchOp.isFinished() );
- ASSERT_EQUALS( targeted.size(), 1u );
- ASSERT_EQUALS( targeted.front()->getWrites().size(), 2u );
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- BatchedCommandResponse response;
- buildResponse( 2, &response );
+ // Create a BSONObj (slightly) bigger than the maximum size by including a max-size string
+ string bigString(BSONObjMaxUserSize, 'x');
- // Response is ok for first and third write
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
+ BatchedUpdateDocument* bigUpdateDoc =
+ buildUpdate(BSON("x" << 1), BSON("data" << bigString), false);
+ request.getUpdateRequest()->addToUpdates(bigUpdateDoc);
+ request.getUpdateRequest()->addToUpdates(buildUpdate(BSON("x" << 2), BSONObj(), false));
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 1 );
- }
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
-
- //
- // Batch failure (ok : 0) reported in a multi-op batch (ordered)
- // Expect this gets translated down into write errors for first affected write
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // First shard batch is ok
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, true, &targeted );
-
- buildErrResponse( ErrorCodes::UnknownError, "mock error", &response );
-
- // Second shard batch fails
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- // We should have recorded an error for the second write
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 1 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getErrCode(), response.getErrCode() );
- }
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
- TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
-
- //
- // Batch failure (ok : 0) reported in a multi-op batch (unordered)
- // Expect this gets translated down into write errors for all affected writes
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // First shard batch is ok
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- buildErrResponse( ErrorCodes::UnknownError, "mock error", &response );
-
- // Second shard batch fails
- batchOp.noteBatchResponse( *targeted.back(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- // We should have recorded an error for the second and third write
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 2u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 1 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getErrCode(), response.getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(1)->getIndex(), 2 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(1)->getErrCode(), response.getErrCode() );
- }
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- TEST(WriteOpTests, MultiOpAbortOrdered) {
-
- //
- // Batch aborted (ordered)
- // Expect this gets translated down into write error for first affected write
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
-
- // First shard batch is ok
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- WriteErrorDetail abortError;
- abortError.setErrCode( ErrorCodes::UnknownError );
- abortError.setErrMessage( "mock abort" );
- batchOp.abortBatch( abortError );
- ASSERT( batchOp.isFinished() );
-
- // We should have recorded an error for the second write
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 1 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 1u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 1 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getErrCode(), abortError.getErrCode() );
- }
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- TEST(WriteOpTests, MultiOpAbortUnordered) {
-
- //
- // Batch aborted (unordered)
- // Expect this gets translated down into write errors for all affected writes
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.setOrdered( false );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -2 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- WriteErrorDetail abortError;
- abortError.setErrCode( ErrorCodes::UnknownError );
- abortError.setErrMessage( "mock abort" );
- batchOp.abortBatch( abortError );
- ASSERT( batchOp.isFinished() );
-
- // We should have recorded an error for the first and second write
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 0 );
- ASSERT( clientResponse.isErrDetailsSet() );
- ASSERT_EQUALS( clientResponse.sizeErrDetails(), 2u );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getIndex(), 0 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(0)->getErrCode(), abortError.getErrCode() );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(1)->getIndex(), 1 );
- ASSERT_EQUALS( clientResponse.getErrDetailsAt(1)->getErrCode(), abortError.getErrCode() );
- }
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
- TEST(WriteOpTests, MultiOpTwoWCErrors) {
-
- //
- // Multi-op targeting test where each op goes to both shards and both return a write concern
- // error
- //
-
- NamespaceString nss( "foo.bar" );
- ShardEndpoint endpointA( "shardA", ChunkVersion::IGNORED() );
- ShardEndpoint endpointB( "shardB", ChunkVersion::IGNORED() );
- MockNSTargeter targeter;
- initTargeterSplitRange( nss, endpointA, endpointB, &targeter );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << -1 ) );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 2 ) );
- request.setWriteConcern( BSON( "w" << 3 ) );
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest( &request );
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch( targeter, false, &targeted );
-
- BatchedCommandResponse response;
- buildResponse( 1, &response );
- addWCError( &response );
-
- // First shard write write concern fails.
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( !batchOp.isFinished() );
-
- targetedOwned.clear();
- status = batchOp.targetBatch( targeter, true, &targeted );
-
- // Second shard write write concern fails.
- batchOp.noteBatchResponse( *targeted.front(), response, NULL );
- ASSERT( batchOp.isFinished() );
-
- BatchedCommandResponse clientResponse;
- batchOp.buildClientResponse( &clientResponse );
- ASSERT( clientResponse.getOk() );
- ASSERT_EQUALS( clientResponse.getN(), 2 );
- ASSERT( !clientResponse.isErrDetailsSet() );
- ASSERT( clientResponse.isWriteConcernErrorSet() );
- }
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+}
+TEST(WriteOpLimitTests, TooManyOps) {
//
- // Tests of batch size limit functionality
+ // Batch of 1002 documents
//
- TEST(WriteOpLimitTests, OneBigDoc) {
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- //
- // Big single operation test - should go through
- //
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
- NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- MockNSTargeter targeter;
- initTargeterFullRange(nss, endpoint, &targeter);
-
- // Create a BSONObj (slightly) bigger than the maximum size by including a max-size string
- string bigString(BSONObjMaxUserSize, 'x');
-
- // Do single-target, single doc batch write op
- BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
- request.setNS(nss.ns());
- request.getInsertRequest()->addToDocuments(BSON( "x" << 1 << "data" << bigString ));
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest(&request);
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
-
- BatchedCommandResponse response;
- buildResponse(1, &response);
-
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(batchOp.isFinished());
+ // Add 2 more than the maximum to the batch
+ for (size_t i = 0; i < BatchedCommandRequest::kMaxWriteBatchSize + 2u; ++i) {
+ request.getDeleteRequest()->addToDeletes(buildDelete(BSON("x" << 2), 0));
}
- TEST(WriteOpLimitTests, OneBigOneSmall) {
-
- //
- // Big doc with smaller additional doc - should go through as two batches
- //
-
- NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- MockNSTargeter targeter;
- initTargeterFullRange(nss, endpoint, &targeter);
-
- // Create a BSONObj (slightly) bigger than the maximum size by including a max-size string
- string bigString(BSONObjMaxUserSize, 'x');
-
- BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
- request.setNS(nss.ns());
- BatchedUpdateDocument* bigUpdateDoc = buildUpdate(BSON( "x" << 1 ),
- BSON( "data" << bigString ),
- false);
- request.getUpdateRequest()->addToUpdates(bigUpdateDoc);
- request.getUpdateRequest()->addToUpdates(buildUpdate(BSON( "x" << 2 ),
- BSONObj(),
- false));
-
- BatchWriteOp batchOp;
- batchOp.initClientRequest(&request);
-
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
-
- BatchedCommandResponse response;
- buildResponse(1, &response);
-
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(!batchOp.isFinished());
-
- targetedOwned.clear();
- status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
-
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(batchOp.isFinished());
- }
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- TEST(WriteOpLimitTests, TooManyOps) {
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 1000u);
- //
- // Batch of 1002 documents
- //
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- MockNSTargeter targeter;
- initTargeterFullRange(nss, endpoint, &targeter);
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
- request.setNS(nss.ns());
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
- // Add 2 more than the maximum to the batch
- for (size_t i = 0; i < BatchedCommandRequest::kMaxWriteBatchSize + 2u; ++i) {
- request.getDeleteRequest()->addToDeletes(buildDelete(BSON( "x" << 2 ), 0));
- }
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+}
- BatchWriteOp batchOp;
- batchOp.initClientRequest(&request);
+TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
+ //
+ // Tests that the overhead of the extra fields in an update x 1000 is included in our size
+ // calculation
+ //
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_EQUALS(targeted.front()->getWrites().size(), 1000u);
+ NamespaceString nss("foo.bar");
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ MockNSTargeter targeter;
+ initTargeterFullRange(nss, endpoint, &targeter);
- BatchedCommandResponse response;
- buildResponse(1, &response);
+ int updateDataBytes =
+ BSONObjMaxUserSize / static_cast<int>(BatchedCommandRequest::kMaxWriteBatchSize);
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(!batchOp.isFinished());
+ string dataString(updateDataBytes -
+ BSON("x" << 1 << "data"
+ << "").objsize(),
+ 'x');
- targetedOwned.clear();
- status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
+ request.setNS(nss.ns());
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(batchOp.isFinished());
+ // Add the maximum number of updates
+ int estSizeBytes = 0;
+ for (size_t i = 0; i < BatchedCommandRequest::kMaxWriteBatchSize; ++i) {
+ BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
+ updateDoc->setQuery(BSON("x" << 1 << "data" << dataString));
+ updateDoc->setUpdateExpr(BSONObj());
+ updateDoc->setMulti(false);
+ updateDoc->setUpsert(false);
+ request.getUpdateRequest()->addToUpdates(updateDoc);
+ estSizeBytes += updateDoc->toBSON().objsize();
}
- TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
-
- //
- // Tests that the overhead of the extra fields in an update x 1000 is included in our size
- // calculation
- //
-
- NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- MockNSTargeter targeter;
- initTargeterFullRange(nss, endpoint, &targeter);
-
- int updateDataBytes = BSONObjMaxUserSize
- / static_cast<int>(BatchedCommandRequest::kMaxWriteBatchSize);
-
- string dataString(updateDataBytes - BSON( "x" << 1 << "data" << "" ).objsize(), 'x');
+ ASSERT_GREATER_THAN(estSizeBytes, BSONObjMaxInternalSize);
- BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
- request.setNS(nss.ns());
+ BatchWriteOp batchOp;
+ batchOp.initClientRequest(&request);
- // Add the maximum number of updates
- int estSizeBytes = 0;
- for (size_t i = 0; i < BatchedCommandRequest::kMaxWriteBatchSize; ++i) {
- BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
- updateDoc->setQuery(BSON( "x" << 1 << "data" << dataString ));
- updateDoc->setUpdateExpr(BSONObj());
- updateDoc->setMulti(false);
- updateDoc->setUpsert(false);
- request.getUpdateRequest()->addToUpdates(updateDoc);
- estSizeBytes += updateDoc->toBSON().objsize();
- }
+ OwnedPointerVector<TargetedWriteBatch> targetedOwned;
+ vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
+ Status status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
- ASSERT_GREATER_THAN(estSizeBytes, BSONObjMaxInternalSize);
+ BatchedCommandRequest childRequest(BatchedCommandRequest::BatchType_Update);
+ batchOp.buildBatchRequest(*targeted.front(), &childRequest);
+ ASSERT_LESS_THAN(childRequest.toBSON().objsize(), BSONObjMaxInternalSize);
- BatchWriteOp batchOp;
- batchOp.initClientRequest(&request);
+ BatchedCommandResponse response;
+ buildResponse(1, &response);
- OwnedPointerVector<TargetedWriteBatch> targetedOwned;
- vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(!batchOp.isFinished());
- BatchedCommandRequest childRequest(BatchedCommandRequest::BatchType_Update);
- batchOp.buildBatchRequest(*targeted.front(), &childRequest);
- ASSERT_LESS_THAN(childRequest.toBSON().objsize(), BSONObjMaxInternalSize);
+ targetedOwned.clear();
+ status = batchOp.targetBatch(targeter, false, &targeted);
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(targeted.size(), 1u);
+ ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
- BatchedCommandResponse response;
- buildResponse(1, &response);
+ childRequest.clear();
+ batchOp.buildBatchRequest(*targeted.front(), &childRequest);
+ ASSERT_LESS_THAN(childRequest.toBSON().objsize(), BSONObjMaxInternalSize);
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(!batchOp.isFinished());
-
- targetedOwned.clear();
- status = batchOp.targetBatch(targeter, false, &targeted);
- ASSERT(status.isOK());
- ASSERT_EQUALS(targeted.size(), 1u);
- ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
-
- childRequest.clear();
- batchOp.buildBatchRequest(*targeted.front(), &childRequest);
- ASSERT_LESS_THAN(childRequest.toBSON().objsize(), BSONObjMaxInternalSize);
-
- batchOp.noteBatchResponse(*targeted.front(), response, NULL);
- ASSERT(batchOp.isFinished());
- }
+ batchOp.noteBatchResponse(*targeted.front(), response, NULL);
+ ASSERT(batchOp.isFinished());
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index b3f4d9911bb..99ef66e767d 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -33,368 +33,364 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- const size_t BatchedCommandRequest::kMaxWriteBatchSize = 1000;
+const size_t BatchedCommandRequest::kMaxWriteBatchSize = 1000;
- BatchedCommandRequest::BatchedCommandRequest( BatchType batchType ) :
- _batchType( batchType ) {
- switch ( getBatchType() ) {
+BatchedCommandRequest::BatchedCommandRequest(BatchType batchType) : _batchType(batchType) {
+ switch (getBatchType()) {
case BatchedCommandRequest::BatchType_Insert:
- _insertReq.reset( new BatchedInsertRequest );
+ _insertReq.reset(new BatchedInsertRequest);
return;
case BatchedCommandRequest::BatchType_Update:
- _updateReq.reset( new BatchedUpdateRequest );
+ _updateReq.reset(new BatchedUpdateRequest);
return;
default:
- dassert( getBatchType() == BatchedCommandRequest::BatchType_Delete );
- _deleteReq.reset( new BatchedDeleteRequest );
+ dassert(getBatchType() == BatchedCommandRequest::BatchType_Delete);
+ _deleteReq.reset(new BatchedDeleteRequest);
return;
- }
}
+}
// This macro just invokes a given method on one of the three types of ops with parameters
-#define INVOKE(M,...) \
-{\
- switch ( getBatchType() ) {\
- case BatchedCommandRequest::BatchType_Insert:\
- return _insertReq->M(__VA_ARGS__);\
- case BatchedCommandRequest::BatchType_Update:\
- return _updateReq->M(__VA_ARGS__);\
- default:\
- dassert( getBatchType() == BatchedCommandRequest::BatchType_Delete );\
- return _deleteReq->M(__VA_ARGS__);\
- }\
-}
-
- BatchedCommandRequest::BatchType BatchedCommandRequest::getBatchType() const {
- return _batchType;
- }
+#define INVOKE(M, ...) \
+ { \
+ switch (getBatchType()) { \
+ case BatchedCommandRequest::BatchType_Insert: \
+ return _insertReq->M(__VA_ARGS__); \
+ case BatchedCommandRequest::BatchType_Update: \
+ return _updateReq->M(__VA_ARGS__); \
+ default: \
+ dassert(getBatchType() == BatchedCommandRequest::BatchType_Delete); \
+ return _deleteReq->M(__VA_ARGS__); \
+ } \
+ }
+
+BatchedCommandRequest::BatchType BatchedCommandRequest::getBatchType() const {
+ return _batchType;
+}
- BatchedInsertRequest* BatchedCommandRequest::getInsertRequest() const {
- return _insertReq.get();
- }
+BatchedInsertRequest* BatchedCommandRequest::getInsertRequest() const {
+ return _insertReq.get();
+}
- BatchedUpdateRequest* BatchedCommandRequest::getUpdateRequest() const {
- return _updateReq.get();
- }
+BatchedUpdateRequest* BatchedCommandRequest::getUpdateRequest() const {
+ return _updateReq.get();
+}
- BatchedDeleteRequest* BatchedCommandRequest::getDeleteRequest() const {
- return _deleteReq.get();
- }
+BatchedDeleteRequest* BatchedCommandRequest::getDeleteRequest() const {
+ return _deleteReq.get();
+}
+
+bool BatchedCommandRequest::isInsertIndexRequest() const {
+ if (_batchType != BatchedCommandRequest::BatchType_Insert)
+ return false;
+ return getNSS().isSystemDotIndexes();
+}
- bool BatchedCommandRequest::isInsertIndexRequest() const {
- if ( _batchType != BatchedCommandRequest::BatchType_Insert ) return false;
- return getNSS().isSystemDotIndexes();
+static bool extractUniqueIndex(const BSONObj& indexDesc) {
+ return indexDesc["unique"].trueValue();
+}
+
+bool BatchedCommandRequest::isUniqueIndexRequest() const {
+ if (!isInsertIndexRequest())
+ return false;
+ return extractUniqueIndex(getInsertRequest()->getDocumentsAt(0));
+}
+
+bool BatchedCommandRequest::isValidIndexRequest(string* errMsg) const {
+ string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
+ dassert(isInsertIndexRequest());
+
+ if (sizeWriteOps() != 1) {
+ *errMsg = "invalid batch request for index creation";
+ return false;
}
- static bool extractUniqueIndex( const BSONObj& indexDesc ) {
- return indexDesc["unique"].trueValue();
+ const NamespaceString& targetNSS = getTargetingNSS();
+ if (!targetNSS.isValid()) {
+ *errMsg = targetNSS.ns() + " is not a valid namespace to index";
+ return false;
}
- bool BatchedCommandRequest::isUniqueIndexRequest() const {
- if ( !isInsertIndexRequest() ) return false;
- return extractUniqueIndex( getInsertRequest()->getDocumentsAt( 0 ) );
+ const NamespaceString& reqNSS = getNSS();
+ if (reqNSS.db().compare(targetNSS.db()) != 0) {
+ *errMsg =
+ targetNSS.ns() + " namespace is not in the request database " + reqNSS.db().toString();
+ return false;
}
- bool BatchedCommandRequest::isValidIndexRequest( string* errMsg ) const {
+ return true;
+}
- string dummy;
- if ( !errMsg )
- errMsg = &dummy;
- dassert( isInsertIndexRequest() );
+string BatchedCommandRequest::getTargetingNS() const {
+ return getTargetingNSS().toString();
+}
- if ( sizeWriteOps() != 1 ) {
- *errMsg = "invalid batch request for index creation";
- return false;
- }
+const NamespaceString& BatchedCommandRequest::getTargetingNSS() const {
+ if (!isInsertIndexRequest())
+ return getNSS();
+ INVOKE(getTargetingNSS);
+}
- const NamespaceString& targetNSS = getTargetingNSS();
- if ( !targetNSS.isValid() ) {
- *errMsg = targetNSS.ns() + " is not a valid namespace to index";
- return false;
- }
+static BSONObj extractIndexKeyPattern(const BSONObj& indexDesc) {
+ return indexDesc["key"].Obj();
+}
- const NamespaceString& reqNSS = getNSS();
- if ( reqNSS.db().compare( targetNSS.db() ) != 0 ) {
- *errMsg = targetNSS.ns() + " namespace is not in the request database "
- + reqNSS.db().toString();
- return false;
- }
+BSONObj BatchedCommandRequest::getIndexKeyPattern() const {
+ dassert(isInsertIndexRequest());
+ return extractIndexKeyPattern(getInsertRequest()->getDocumentsAt(0));
+}
+bool BatchedCommandRequest::isVerboseWC() const {
+ if (!isWriteConcernSet()) {
return true;
}
- string BatchedCommandRequest::getTargetingNS() const {
- return getTargetingNSS().toString();
- }
-
- const NamespaceString& BatchedCommandRequest::getTargetingNSS() const {
- if ( !isInsertIndexRequest() ) return getNSS();
- INVOKE(getTargetingNSS);
- }
-
- static BSONObj extractIndexKeyPattern( const BSONObj& indexDesc ) {
- return indexDesc["key"].Obj();
- }
-
- BSONObj BatchedCommandRequest::getIndexKeyPattern() const {
- dassert( isInsertIndexRequest() );
- return extractIndexKeyPattern( getInsertRequest()->getDocumentsAt( 0 ) );
+ BSONObj writeConcern = getWriteConcern();
+ BSONElement wElem = writeConcern["w"];
+ if (!wElem.isNumber() || wElem.Number() != 0) {
+ return true;
}
- bool BatchedCommandRequest::isVerboseWC() const {
- if ( !isWriteConcernSet() ) {
- return true;
- }
-
- BSONObj writeConcern = getWriteConcern();
- BSONElement wElem = writeConcern["w"];
- if ( !wElem.isNumber() || wElem.Number() != 0 ) {
- return true;
- }
-
- return false;
- }
+ return false;
+}
- void BatchedCommandRequest::cloneTo( BatchedCommandRequest* other ) const {
- other->_insertReq.reset();
- other->_updateReq.reset();
- other->_deleteReq.reset();
- other->_batchType = _batchType;
+void BatchedCommandRequest::cloneTo(BatchedCommandRequest* other) const {
+ other->_insertReq.reset();
+ other->_updateReq.reset();
+ other->_deleteReq.reset();
+ other->_batchType = _batchType;
- switch ( getBatchType() ) {
+ switch (getBatchType()) {
case BatchedCommandRequest::BatchType_Insert:
- other->_insertReq.reset( new BatchedInsertRequest );
- _insertReq->cloneTo( other->_insertReq.get() );
+ other->_insertReq.reset(new BatchedInsertRequest);
+ _insertReq->cloneTo(other->_insertReq.get());
return;
case BatchedCommandRequest::BatchType_Update:
- other->_updateReq.reset( new BatchedUpdateRequest );
- _updateReq->cloneTo( other->_updateReq.get() );
+ other->_updateReq.reset(new BatchedUpdateRequest);
+ _updateReq->cloneTo(other->_updateReq.get());
return;
default:
- dassert( getBatchType() == BatchedCommandRequest::BatchType_Delete );
- other->_deleteReq.reset( new BatchedDeleteRequest );
- _deleteReq->cloneTo( other->_deleteReq.get() );
+ dassert(getBatchType() == BatchedCommandRequest::BatchType_Delete);
+ other->_deleteReq.reset(new BatchedDeleteRequest);
+ _deleteReq->cloneTo(other->_deleteReq.get());
return;
- }
}
+}
- bool BatchedCommandRequest::isValid( std::string* errMsg ) const {
- INVOKE( isValid, errMsg );
- }
+bool BatchedCommandRequest::isValid(std::string* errMsg) const {
+ INVOKE(isValid, errMsg);
+}
- BSONObj BatchedCommandRequest::toBSON() const {
- INVOKE( toBSON );
- }
+BSONObj BatchedCommandRequest::toBSON() const {
+ INVOKE(toBSON);
+}
- bool BatchedCommandRequest::parseBSON( const BSONObj& source, std::string* errMsg ) {
- INVOKE( parseBSON, source, errMsg );
- }
+bool BatchedCommandRequest::parseBSON(const BSONObj& source, std::string* errMsg) {
+ INVOKE(parseBSON, source, errMsg);
+}
- void BatchedCommandRequest::clear() {
- INVOKE( clear );
- }
+void BatchedCommandRequest::clear() {
+ INVOKE(clear);
+}
- std::string BatchedCommandRequest::toString() const {
- INVOKE( toString );
- }
+std::string BatchedCommandRequest::toString() const {
+ INVOKE(toString);
+}
- void BatchedCommandRequest::setNSS( const NamespaceString& nss ) {
- INVOKE( setCollNameNS, nss );
- }
+void BatchedCommandRequest::setNSS(const NamespaceString& nss) {
+ INVOKE(setCollNameNS, nss);
+}
- void BatchedCommandRequest::setNS( StringData collName ) {
- INVOKE( setCollName, collName );
- }
+void BatchedCommandRequest::setNS(StringData collName) {
+ INVOKE(setCollName, collName);
+}
- const std::string& BatchedCommandRequest::getNS() const {
- INVOKE( getCollName );
- }
+const std::string& BatchedCommandRequest::getNS() const {
+ INVOKE(getCollName);
+}
- const NamespaceString& BatchedCommandRequest::getNSS() const {
- INVOKE(getCollNameNS);
- }
+const NamespaceString& BatchedCommandRequest::getNSS() const {
+ INVOKE(getCollNameNS);
+}
- std::size_t BatchedCommandRequest::sizeWriteOps() const {
- switch ( getBatchType() ) {
+std::size_t BatchedCommandRequest::sizeWriteOps() const {
+ switch (getBatchType()) {
case BatchedCommandRequest::BatchType_Insert:
return _insertReq->sizeDocuments();
case BatchedCommandRequest::BatchType_Update:
return _updateReq->sizeUpdates();
default:
return _deleteReq->sizeDeletes();
- }
}
+}
- void BatchedCommandRequest::setWriteConcern( const BSONObj& writeConcern ) {
- INVOKE( setWriteConcern, writeConcern );
- }
+void BatchedCommandRequest::setWriteConcern(const BSONObj& writeConcern) {
+ INVOKE(setWriteConcern, writeConcern);
+}
- void BatchedCommandRequest::unsetWriteConcern() {
- INVOKE( unsetWriteConcern );
- }
+void BatchedCommandRequest::unsetWriteConcern() {
+ INVOKE(unsetWriteConcern);
+}
- bool BatchedCommandRequest::isWriteConcernSet() const {
- INVOKE( isWriteConcernSet );
- }
+bool BatchedCommandRequest::isWriteConcernSet() const {
+ INVOKE(isWriteConcernSet);
+}
- const BSONObj& BatchedCommandRequest::getWriteConcern() const {
- INVOKE( getWriteConcern );
- }
+const BSONObj& BatchedCommandRequest::getWriteConcern() const {
+ INVOKE(getWriteConcern);
+}
- void BatchedCommandRequest::setOrdered( bool continueOnError ) {
- INVOKE( setOrdered, continueOnError );
- }
+void BatchedCommandRequest::setOrdered(bool continueOnError) {
+ INVOKE(setOrdered, continueOnError);
+}
- void BatchedCommandRequest::unsetOrdered() {
- INVOKE( unsetOrdered );
- }
+void BatchedCommandRequest::unsetOrdered() {
+ INVOKE(unsetOrdered);
+}
- bool BatchedCommandRequest::isOrderedSet() const {
- INVOKE( isOrderedSet );
- }
+bool BatchedCommandRequest::isOrderedSet() const {
+ INVOKE(isOrderedSet);
+}
- bool BatchedCommandRequest::getOrdered() const {
- INVOKE( getOrdered );
- }
+bool BatchedCommandRequest::getOrdered() const {
+ INVOKE(getOrdered);
+}
- void BatchedCommandRequest::setMetadata(BatchedRequestMetadata* metadata) {
- INVOKE( setMetadata, metadata );
- }
+void BatchedCommandRequest::setMetadata(BatchedRequestMetadata* metadata) {
+ INVOKE(setMetadata, metadata);
+}
- void BatchedCommandRequest::unsetMetadata() {
- INVOKE( unsetMetadata );
- }
+void BatchedCommandRequest::unsetMetadata() {
+ INVOKE(unsetMetadata);
+}
- bool BatchedCommandRequest::isMetadataSet() const {
- INVOKE( isMetadataSet );
- }
+bool BatchedCommandRequest::isMetadataSet() const {
+ INVOKE(isMetadataSet);
+}
- BatchedRequestMetadata* BatchedCommandRequest::getMetadata() const {
- INVOKE( getMetadata );
- }
+BatchedRequestMetadata* BatchedCommandRequest::getMetadata() const {
+ INVOKE(getMetadata);
+}
- void BatchedCommandRequest::setShouldBypassValidation(bool newVal) {
- INVOKE(setShouldBypassValidation, newVal);
- }
+void BatchedCommandRequest::setShouldBypassValidation(bool newVal) {
+ INVOKE(setShouldBypassValidation, newVal);
+}
- bool BatchedCommandRequest::shouldBypassValidation() const {
- INVOKE(shouldBypassValidation);
- }
+bool BatchedCommandRequest::shouldBypassValidation() const {
+ INVOKE(shouldBypassValidation);
+}
- /**
- * Generates a new request with insert _ids if required. Otherwise returns NULL.
- */
- BatchedCommandRequest* //
+/**
+ * Generates a new request with insert _ids if required. Otherwise returns NULL.
+ */
+BatchedCommandRequest* //
BatchedCommandRequest::cloneWithIds(const BatchedCommandRequest& origCmdRequest) {
+ if (origCmdRequest.getBatchType() != BatchedCommandRequest::BatchType_Insert ||
+ origCmdRequest.isInsertIndexRequest())
+ return NULL;
- if (origCmdRequest.getBatchType() != BatchedCommandRequest::BatchType_Insert
- || origCmdRequest.isInsertIndexRequest())
- return NULL;
-
- unique_ptr<BatchedInsertRequest> idRequest;
- BatchedInsertRequest* origRequest = origCmdRequest.getInsertRequest();
+ unique_ptr<BatchedInsertRequest> idRequest;
+ BatchedInsertRequest* origRequest = origCmdRequest.getInsertRequest();
- const vector<BSONObj>& inserts = origRequest->getDocuments();
+ const vector<BSONObj>& inserts = origRequest->getDocuments();
- size_t i = 0u;
- for (vector<BSONObj>::const_iterator it = inserts.begin(); it != inserts.end(); ++it, ++i) {
+ size_t i = 0u;
+ for (vector<BSONObj>::const_iterator it = inserts.begin(); it != inserts.end(); ++it, ++i) {
+ const BSONObj& insert = *it;
+ BSONObj idInsert;
- const BSONObj& insert = *it;
- BSONObj idInsert;
-
- if (insert["_id"].eoo()) {
- BSONObjBuilder idInsertB;
- idInsertB.append("_id", OID::gen());
- idInsertB.appendElements(insert);
- idInsert = idInsertB.obj();
- }
-
- if (NULL == idRequest.get() && !idInsert.isEmpty()) {
- idRequest.reset(new BatchedInsertRequest);
- origRequest->cloneTo(idRequest.get());
- }
-
- if (!idInsert.isEmpty()) {
- idRequest->setDocumentAt(i, idInsert);
- }
+ if (insert["_id"].eoo()) {
+ BSONObjBuilder idInsertB;
+ idInsertB.append("_id", OID::gen());
+ idInsertB.appendElements(insert);
+ idInsert = idInsertB.obj();
}
- if (NULL == idRequest.get())
- return NULL;
+ if (NULL == idRequest.get() && !idInsert.isEmpty()) {
+ idRequest.reset(new BatchedInsertRequest);
+ origRequest->cloneTo(idRequest.get());
+ }
- // Command request owns idRequest
- return new BatchedCommandRequest(idRequest.release());
+ if (!idInsert.isEmpty()) {
+ idRequest->setDocumentAt(i, idInsert);
+ }
}
- bool BatchedCommandRequest::containsNoIDUpsert(const BatchedCommandRequest& request) {
+ if (NULL == idRequest.get())
+ return NULL;
- if (request.getBatchType() != BatchedCommandRequest::BatchType_Update)
- return false;
-
- const vector<BatchedUpdateDocument*>& updates =
- request.getUpdateRequest()->getUpdates();
-
- for (vector<BatchedUpdateDocument*>::const_iterator it = updates.begin();
- it != updates.end(); ++it) {
-
- const BatchedUpdateDocument* updateDoc = *it;
- if (updateDoc->getUpsert() && updateDoc->getQuery()["_id"].eoo())
- return true;
- }
+ // Command request owns idRequest
+ return new BatchedCommandRequest(idRequest.release());
+}
+bool BatchedCommandRequest::containsNoIDUpsert(const BatchedCommandRequest& request) {
+ if (request.getBatchType() != BatchedCommandRequest::BatchType_Update)
return false;
- }
- bool BatchedCommandRequest::containsUpserts( const BSONObj& writeCmdObj ) {
+ const vector<BatchedUpdateDocument*>& updates = request.getUpdateRequest()->getUpdates();
- BSONElement updatesEl = writeCmdObj[BatchedUpdateRequest::updates()];
- if ( updatesEl.type() != Array ) {
- return false;
- }
+ for (vector<BatchedUpdateDocument*>::const_iterator it = updates.begin(); it != updates.end();
+ ++it) {
+ const BatchedUpdateDocument* updateDoc = *it;
+ if (updateDoc->getUpsert() && updateDoc->getQuery()["_id"].eoo())
+ return true;
+ }
- BSONObjIterator it( updatesEl.Obj() );
- while ( it.more() ) {
- BSONElement updateEl = it.next();
- if ( !updateEl.isABSONObj() ) continue;
- if ( updateEl.Obj()[BatchedUpdateDocument::upsert()].trueValue() ) return true;
- }
+ return false;
+}
+bool BatchedCommandRequest::containsUpserts(const BSONObj& writeCmdObj) {
+ BSONElement updatesEl = writeCmdObj[BatchedUpdateRequest::updates()];
+ if (updatesEl.type() != Array) {
return false;
}
- bool BatchedCommandRequest::getIndexedNS( const BSONObj& writeCmdObj,
- string* nsToIndex,
- string* errMsg ) {
+ BSONObjIterator it(updatesEl.Obj());
+ while (it.more()) {
+ BSONElement updateEl = it.next();
+ if (!updateEl.isABSONObj())
+ continue;
+ if (updateEl.Obj()[BatchedUpdateDocument::upsert()].trueValue())
+ return true;
+ }
- BSONElement documentsEl = writeCmdObj[BatchedInsertRequest::documents()];
- if ( documentsEl.type() != Array ) {
- *errMsg = "index write batch is invalid";
- return false;
- }
+ return false;
+}
- BSONObjIterator it( documentsEl.Obj() );
- if ( !it.more() ) {
- *errMsg = "index write batch is empty";
- return false;
- }
+bool BatchedCommandRequest::getIndexedNS(const BSONObj& writeCmdObj,
+ string* nsToIndex,
+ string* errMsg) {
+ BSONElement documentsEl = writeCmdObj[BatchedInsertRequest::documents()];
+ if (documentsEl.type() != Array) {
+ *errMsg = "index write batch is invalid";
+ return false;
+ }
- BSONElement indexDescEl = it.next();
- *nsToIndex = indexDescEl["ns"].str();
- if ( *nsToIndex == "" ) {
- *errMsg = "index write batch contains an invalid index descriptor";
- return false;
- }
+ BSONObjIterator it(documentsEl.Obj());
+ if (!it.more()) {
+ *errMsg = "index write batch is empty";
+ return false;
+ }
- if ( it.more() ) {
- *errMsg = "index write batches may only contain a single index descriptor";
- return false;
- }
+ BSONElement indexDescEl = it.next();
+ *nsToIndex = indexDescEl["ns"].str();
+ if (*nsToIndex == "") {
+ *errMsg = "index write batch contains an invalid index descriptor";
+ return false;
+ }
- return true;
+ if (it.more()) {
+ *errMsg = "index write batches may only contain a single index descriptor";
+ return false;
}
-} // namespace mongo
+ return true;
+}
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index 66e178dd5a8..b8e6e2f8e80 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -37,213 +37,204 @@
namespace mongo {
- class NamespaceString;
+class NamespaceString;
- /**
- * This class wraps the different kinds of command requests into a generically usable write
- * command request.
- *
- * Designed to be a very thin wrapper that mimics the underlying requests exactly. Owns the
- * wrapped request object once constructed.
- */
- class BatchedCommandRequest : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedCommandRequest);
- public:
-
- // Maximum number of write ops supported per batch
- static const size_t kMaxWriteBatchSize;
-
- enum BatchType {
- BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType_Unknown
- };
+/**
+ * This class wraps the different kinds of command requests into a generically usable write
+ * command request.
+ *
+ * Designed to be a very thin wrapper that mimics the underlying requests exactly. Owns the
+ * wrapped request object once constructed.
+ */
+class BatchedCommandRequest : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedCommandRequest);
- //
- // construction / destruction
- //
+public:
+ // Maximum number of write ops supported per batch
+ static const size_t kMaxWriteBatchSize;
- BatchedCommandRequest( BatchType batchType );
+ enum BatchType { BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType_Unknown };
- /**
- * insertReq ownership is transferred to here.
- */
- BatchedCommandRequest( BatchedInsertRequest* insertReq ) :
- _batchType( BatchType_Insert ), _insertReq( insertReq ) {
- }
+ //
+ // construction / destruction
+ //
- /**
- * updateReq ownership is transferred to here.
- */
- BatchedCommandRequest( BatchedUpdateRequest* updateReq ) :
- _batchType( BatchType_Update ), _updateReq( updateReq ) {
- }
+ BatchedCommandRequest(BatchType batchType);
- /**
- * deleteReq ownership is transferred to here.
- */
- BatchedCommandRequest( BatchedDeleteRequest* deleteReq ) :
- _batchType( BatchType_Delete ), _deleteReq( deleteReq ) {
- }
-
- virtual ~BatchedCommandRequest() {};
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo( BatchedCommandRequest* other ) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid( std::string* errMsg ) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON( const BSONObj& source, std::string* errMsg );
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // Batch type accessors
- //
-
- BatchType getBatchType() const;
- BatchedInsertRequest* getInsertRequest() const;
- BatchedUpdateRequest* getUpdateRequest() const;
- BatchedDeleteRequest* getDeleteRequest() const;
- // Index creation is also an insert, but a weird one.
- bool isInsertIndexRequest() const;
- bool isUniqueIndexRequest() const;
- bool isValidIndexRequest( std::string* errMsg ) const;
- std::string getTargetingNS() const;
- const NamespaceString& getTargetingNSS() const;
- BSONObj getIndexKeyPattern() const;
-
- //
- // individual field accessors
- //
-
- bool isVerboseWC() const;
-
- void setNSS( const NamespaceString& nss );
- void setNS( StringData collName );
- const std::string& getNS() const;
- const NamespaceString& getNSS() const;
-
- std::size_t sizeWriteOps() const;
-
- void setWriteConcern( const BSONObj& writeConcern );
- void unsetWriteConcern();
- bool isWriteConcernSet() const;
- const BSONObj& getWriteConcern() const;
-
- void setOrdered( bool ordered );
- void unsetOrdered();
- bool isOrderedSet() const;
- bool getOrdered() const;
-
- void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
- bool isMetadataSet() const;
- BatchedRequestMetadata* getMetadata() const;
-
- void setShouldBypassValidation(bool newVal);
- bool shouldBypassValidation() const;
-
- //
- // Helpers for batch pre-processing
- //
-
- /**
- * Generates a new request, the same as the old, but with insert _ids if required.
- * Returns NULL if this is not an insert request or all inserts already have _ids.
- */
- static BatchedCommandRequest* cloneWithIds(const BatchedCommandRequest& origCmdRequest);
-
- /**
- * Whether or not this batch contains an upsert without an _id - these can't be sent
- * to multiple hosts.
- */
- static bool containsNoIDUpsert(const BatchedCommandRequest& request);
-
- //
- // Helpers for auth pre-parsing
- //
-
- /**
- * Helper to determine whether or not there are any upserts in the batch
- */
- static bool containsUpserts( const BSONObj& writeCmdObj );
-
- /**
- * Helper to extract the namespace being indexed from a raw BSON write command.
- *
- * Returns false with errMsg if the index write command seems invalid.
- * TODO: Remove when we have parsing hooked before authorization
- */
- static bool getIndexedNS( const BSONObj& writeCmdObj,
- std::string* nsToIndex,
- std::string* errMsg );
-
- private:
-
- BatchType _batchType;
- std::unique_ptr<BatchedInsertRequest> _insertReq;
- std::unique_ptr<BatchedUpdateRequest> _updateReq;
- std::unique_ptr<BatchedDeleteRequest> _deleteReq;
- };
+ /**
+ * insertReq ownership is transferred to here.
+ */
+ BatchedCommandRequest(BatchedInsertRequest* insertReq)
+ : _batchType(BatchType_Insert), _insertReq(insertReq) {}
/**
- * Similar to above, this class wraps the write items of a command request into a generically
- * usable type. Very thin wrapper, does not own the write item itself.
- *
- * TODO: Use in BatchedCommandRequest above
+ * updateReq ownership is transferred to here.
*/
- class BatchItemRef {
- public:
+ BatchedCommandRequest(BatchedUpdateRequest* updateReq)
+ : _batchType(BatchType_Update), _updateReq(updateReq) {}
- BatchItemRef( const BatchedCommandRequest* request, int itemIndex ) :
- _request( request ), _itemIndex( itemIndex ) {
- }
+ /**
+ * deleteReq ownership is transferred to here.
+ */
+ BatchedCommandRequest(BatchedDeleteRequest* deleteReq)
+ : _batchType(BatchType_Delete), _deleteReq(deleteReq) {}
+
+ virtual ~BatchedCommandRequest(){};
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedCommandRequest* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // Batch type accessors
+ //
+
+ BatchType getBatchType() const;
+ BatchedInsertRequest* getInsertRequest() const;
+ BatchedUpdateRequest* getUpdateRequest() const;
+ BatchedDeleteRequest* getDeleteRequest() const;
+ // Index creation is also an insert, but a weird one.
+ bool isInsertIndexRequest() const;
+ bool isUniqueIndexRequest() const;
+ bool isValidIndexRequest(std::string* errMsg) const;
+ std::string getTargetingNS() const;
+ const NamespaceString& getTargetingNSS() const;
+ BSONObj getIndexKeyPattern() const;
+
+ //
+ // individual field accessors
+ //
+
+ bool isVerboseWC() const;
+
+ void setNSS(const NamespaceString& nss);
+ void setNS(StringData collName);
+ const std::string& getNS() const;
+ const NamespaceString& getNSS() const;
+
+ std::size_t sizeWriteOps() const;
+
+ void setWriteConcern(const BSONObj& writeConcern);
+ void unsetWriteConcern();
+ bool isWriteConcernSet() const;
+ const BSONObj& getWriteConcern() const;
+
+ void setOrdered(bool ordered);
+ void unsetOrdered();
+ bool isOrderedSet() const;
+ bool getOrdered() const;
+
+ void setMetadata(BatchedRequestMetadata* metadata);
+ void unsetMetadata();
+ bool isMetadataSet() const;
+ BatchedRequestMetadata* getMetadata() const;
+
+ void setShouldBypassValidation(bool newVal);
+ bool shouldBypassValidation() const;
+
+ //
+ // Helpers for batch pre-processing
+ //
- const BatchedCommandRequest* getRequest() const {
- return _request;
- }
+ /**
+ * Generates a new request, the same as the old, but with insert _ids if required.
+ * Returns NULL if this is not an insert request or all inserts already have _ids.
+ */
+ static BatchedCommandRequest* cloneWithIds(const BatchedCommandRequest& origCmdRequest);
- int getItemIndex() const {
- return _itemIndex;
- }
+ /**
+ * Whether or not this batch contains an upsert without an _id - these can't be sent
+ * to multiple hosts.
+ */
+ static bool containsNoIDUpsert(const BatchedCommandRequest& request);
- BatchedCommandRequest::BatchType getOpType() const {
- return _request->getBatchType();
- }
+ //
+ // Helpers for auth pre-parsing
+ //
- const BSONObj& getDocument() const {
- dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps() ) );
- return _request->getInsertRequest()->getDocumentsAt( _itemIndex );
- }
+ /**
+ * Helper to determine whether or not there are any upserts in the batch
+ */
+ static bool containsUpserts(const BSONObj& writeCmdObj);
- const BatchedUpdateDocument* getUpdate() const {
- dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps() ) );
- return _request->getUpdateRequest()->getUpdatesAt( _itemIndex );
- }
+ /**
+ * Helper to extract the namespace being indexed from a raw BSON write command.
+ *
+ * Returns false with errMsg if the index write command seems invalid.
+ * TODO: Remove when we have parsing hooked before authorization
+ */
+ static bool getIndexedNS(const BSONObj& writeCmdObj,
+ std::string* nsToIndex,
+ std::string* errMsg);
- const BatchedDeleteDocument* getDelete() const {
- dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps() ) );
- return _request->getDeleteRequest()->getDeletesAt( _itemIndex );
- }
+private:
+ BatchType _batchType;
+ std::unique_ptr<BatchedInsertRequest> _insertReq;
+ std::unique_ptr<BatchedUpdateRequest> _updateReq;
+ std::unique_ptr<BatchedDeleteRequest> _deleteReq;
+};
- BSONObj toBSON() const {
- switch ( getOpType() ) {
+/**
+ * Similar to above, this class wraps the write items of a command request into a generically
+ * usable type. Very thin wrapper, does not own the write item itself.
+ *
+ * TODO: Use in BatchedCommandRequest above
+ */
+class BatchItemRef {
+public:
+ BatchItemRef(const BatchedCommandRequest* request, int itemIndex)
+ : _request(request), _itemIndex(itemIndex) {}
+
+ const BatchedCommandRequest* getRequest() const {
+ return _request;
+ }
+
+ int getItemIndex() const {
+ return _itemIndex;
+ }
+
+ BatchedCommandRequest::BatchType getOpType() const {
+ return _request->getBatchType();
+ }
+
+ const BSONObj& getDocument() const {
+ dassert(_itemIndex < static_cast<int>(_request->sizeWriteOps()));
+ return _request->getInsertRequest()->getDocumentsAt(_itemIndex);
+ }
+
+ const BatchedUpdateDocument* getUpdate() const {
+ dassert(_itemIndex < static_cast<int>(_request->sizeWriteOps()));
+ return _request->getUpdateRequest()->getUpdatesAt(_itemIndex);
+ }
+
+ const BatchedDeleteDocument* getDelete() const {
+ dassert(_itemIndex < static_cast<int>(_request->sizeWriteOps()));
+ return _request->getDeleteRequest()->getDeletesAt(_itemIndex);
+ }
+
+ BSONObj toBSON() const {
+ switch (getOpType()) {
case BatchedCommandRequest::BatchType_Insert:
return getDocument();
case BatchedCommandRequest::BatchType_Update:
return getUpdate()->toBSON();
default:
return getDelete()->toBSON();
- }
}
+ }
- private:
-
- const BatchedCommandRequest* _request;
- const int _itemIndex;
- };
+private:
+ const BatchedCommandRequest* _request;
+ const int _itemIndex;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp
index cce5fa660cc..4f137d1dfe6 100644
--- a/src/mongo/s/write_ops/batched_command_response.cpp
+++ b/src/mongo/s/write_ops/batched_command_response.cpp
@@ -33,524 +33,537 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- using mongoutils::str::stream;
-
- const BSONField<int> BatchedCommandResponse::ok("ok");
- const BSONField<int> BatchedCommandResponse::errCode("code", ErrorCodes::UnknownError);
- const BSONField<string> BatchedCommandResponse::errMessage("errmsg");
- const BSONField<long long> BatchedCommandResponse::n("n", 0);
- const BSONField<long long> BatchedCommandResponse::nModified("nModified", 0);
- const BSONField<std::vector<BatchedUpsertDetail*> >
- BatchedCommandResponse::upsertDetails("upserted");
- const BSONField<Timestamp> BatchedCommandResponse::lastOp("lastOp");
- const BSONField<OID> BatchedCommandResponse::electionId("electionId");
- const BSONField<std::vector<WriteErrorDetail*> >
- BatchedCommandResponse::writeErrors("writeErrors");
- const BSONField<WCErrorDetail*> BatchedCommandResponse::writeConcernError("writeConcernError");
-
- BatchedCommandResponse::BatchedCommandResponse() {
- clear();
- }
+using std::unique_ptr;
+using std::string;
- BatchedCommandResponse::~BatchedCommandResponse() {
- unsetErrDetails();
- unsetUpsertDetails();
- }
+using mongoutils::str::stream;
- bool BatchedCommandResponse::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+const BSONField<int> BatchedCommandResponse::ok("ok");
+const BSONField<int> BatchedCommandResponse::errCode("code", ErrorCodes::UnknownError);
+const BSONField<string> BatchedCommandResponse::errMessage("errmsg");
+const BSONField<long long> BatchedCommandResponse::n("n", 0);
+const BSONField<long long> BatchedCommandResponse::nModified("nModified", 0);
+const BSONField<std::vector<BatchedUpsertDetail*>> BatchedCommandResponse::upsertDetails(
+ "upserted");
+const BSONField<Timestamp> BatchedCommandResponse::lastOp("lastOp");
+const BSONField<OID> BatchedCommandResponse::electionId("electionId");
+const BSONField<std::vector<WriteErrorDetail*>> BatchedCommandResponse::writeErrors("writeErrors");
+const BSONField<WCErrorDetail*> BatchedCommandResponse::writeConcernError("writeConcernError");
- // All the mandatory fields must be present.
- if (!_isOkSet) {
- *errMsg = stream() << "missing " << ok.name() << " field";
- return false;
- }
+BatchedCommandResponse::BatchedCommandResponse() {
+ clear();
+}
+
+BatchedCommandResponse::~BatchedCommandResponse() {
+ unsetErrDetails();
+ unsetUpsertDetails();
+}
- return true;
+bool BatchedCommandResponse::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BSONObj BatchedCommandResponse::toBSON() const {
- BSONObjBuilder builder;
+ // All the mandatory fields must be present.
+ if (!_isOkSet) {
+ *errMsg = stream() << "missing " << ok.name() << " field";
+ return false;
+ }
- if (_isOkSet) builder.append(ok(), _ok);
+ return true;
+}
- if (_isErrCodeSet) builder.append(errCode(), _errCode);
+BSONObj BatchedCommandResponse::toBSON() const {
+ BSONObjBuilder builder;
- if (_isErrMessageSet) builder.append(errMessage(), _errMessage);
+ if (_isOkSet)
+ builder.append(ok(), _ok);
- if (_isNModifiedSet) builder.appendNumber(nModified(), _nModified);
- if (_isNSet) builder.appendNumber(n(), _n);
+ if (_isErrCodeSet)
+ builder.append(errCode(), _errCode);
- if (_upsertDetails.get()) {
- BSONArrayBuilder upsertedBuilder(builder.subarrayStart(upsertDetails()));
- for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
- it != _upsertDetails->end();
- ++it) {
- BSONObj upsertedDetailsDocument = (*it)->toBSON();
- upsertedBuilder.append(upsertedDetailsDocument);
- }
- upsertedBuilder.done();
- }
+ if (_isErrMessageSet)
+ builder.append(errMessage(), _errMessage);
- if (_isLastOpSet) builder.append(lastOp(), _lastOp);
- if (_isElectionIdSet) builder.appendOID(electionId(), const_cast<OID*>(&_electionId));
-
- if (_writeErrorDetails.get()) {
- BSONArrayBuilder errDetailsBuilder(builder.subarrayStart(writeErrors()));
- for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
- it != _writeErrorDetails->end();
- ++it) {
- BSONObj errDetailsDocument = (*it)->toBSON();
- errDetailsBuilder.append(errDetailsDocument);
- }
- errDetailsBuilder.done();
- }
+ if (_isNModifiedSet)
+ builder.appendNumber(nModified(), _nModified);
+ if (_isNSet)
+ builder.appendNumber(n(), _n);
- if (_wcErrDetails.get()) {
- builder.append(writeConcernError(), _wcErrDetails->toBSON());
+ if (_upsertDetails.get()) {
+ BSONArrayBuilder upsertedBuilder(builder.subarrayStart(upsertDetails()));
+ for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
+ it != _upsertDetails->end();
+ ++it) {
+ BSONObj upsertedDetailsDocument = (*it)->toBSON();
+ upsertedBuilder.append(upsertedDetailsDocument);
}
-
- return builder.obj();
+ upsertedBuilder.done();
}
- bool BatchedCommandResponse::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extractNumber(source, ok, &_ok, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isOkSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
-
- // We're using appendNumber on generation so we'll try a smaller type
- // (int) first and then fall back to the original type (long long).
- BSONField<int> fieldN(n());
- int tempN;
- fieldState = FieldParser::extract(source, fieldN, &tempN, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) {
- // try falling back to a larger type
- fieldState = FieldParser::extract(source, n, &_n, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNSet = fieldState == FieldParser::FIELD_SET;
- }
- else if (fieldState == FieldParser::FIELD_SET) {
- _isNSet = true;
- _n = tempN;
- }
+ if (_isLastOpSet)
+ builder.append(lastOp(), _lastOp);
+ if (_isElectionIdSet)
+ builder.appendOID(electionId(), const_cast<OID*>(&_electionId));
- // We're using appendNumber on generation so we'll try a smaller type
- // (int) first and then fall back to the original type (long long).
- BSONField<int> fieldNModified(nModified());
- int intNModified;
- fieldState = FieldParser::extract(source, fieldNModified, &intNModified, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) {
- // try falling back to a larger type
- fieldState = FieldParser::extract(source, nModified, &_nModified, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isNModifiedSet = fieldState == FieldParser::FIELD_SET;
- }
- else if (fieldState == FieldParser::FIELD_SET) {
- _isNModifiedSet = true;
- _nModified = intNModified;
+ if (_writeErrorDetails.get()) {
+ BSONArrayBuilder errDetailsBuilder(builder.subarrayStart(writeErrors()));
+ for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
+ it != _writeErrorDetails->end();
+ ++it) {
+ BSONObj errDetailsDocument = (*it)->toBSON();
+ errDetailsBuilder.append(errDetailsDocument);
}
-
- std::vector<BatchedUpsertDetail*>* tempUpsertDetails = NULL;
- fieldState = FieldParser::extract( source, upsertDetails, &tempUpsertDetails, errMsg );
- if ( fieldState == FieldParser::FIELD_INVALID ) return false;
- _upsertDetails.reset(tempUpsertDetails);
-
- fieldState = FieldParser::extract(source, lastOp, &_lastOp, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isLastOpSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, electionId, &_electionId, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isElectionIdSet = fieldState == FieldParser::FIELD_SET;
-
- std::vector<WriteErrorDetail*>* tempErrDetails = NULL;
- fieldState = FieldParser::extract(source, writeErrors, &tempErrDetails, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _writeErrorDetails.reset(tempErrDetails);
-
- WCErrorDetail* wcError = NULL;
- fieldState = FieldParser::extract(source, writeConcernError, &wcError, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _wcErrDetails.reset(wcError);
-
- return true;
+ errDetailsBuilder.done();
}
- void BatchedCommandResponse::clear() {
- _ok = false;
- _isOkSet = false;
-
- _errCode = 0;
- _isErrCodeSet = false;
-
- _errMessage.clear();
- _isErrMessageSet = false;
+ if (_wcErrDetails.get()) {
+ builder.append(writeConcernError(), _wcErrDetails->toBSON());
+ }
- _nModified = 0;
- _isNModifiedSet = false;
+ return builder.obj();
+}
- _n = 0;
- _isNSet = false;
+bool BatchedCommandResponse::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- _singleUpserted = BSONObj();
- _isSingleUpsertedSet = false;
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- if ( _upsertDetails.get() ) {
- for ( std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
- it != _upsertDetails->end(); ++it ) {
- delete *it;
- };
- _upsertDetails.reset();
- }
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extractNumber(source, ok, &_ok, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isOkSet = fieldState == FieldParser::FIELD_SET;
- _lastOp = Timestamp();
- _isLastOpSet = false;
+ fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
- _electionId = OID();
- _isElectionIdSet = false;
+ fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
- if (_writeErrorDetails.get()) {
- for(std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
- it != _writeErrorDetails->end();
- ++it) {
- delete *it;
- };
- _writeErrorDetails.reset();
- }
-
- _wcErrDetails.reset();
+ // We're using appendNumber on generation so we'll try a smaller type
+ // (int) first and then fall back to the original type (long long).
+ BSONField<int> fieldN(n());
+ int tempN;
+ fieldState = FieldParser::extract(source, fieldN, &tempN, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID) {
+ // try falling back to a larger type
+ fieldState = FieldParser::extract(source, n, &_n, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isNSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldState == FieldParser::FIELD_SET) {
+ _isNSet = true;
+ _n = tempN;
+ }
+
+ // We're using appendNumber on generation so we'll try a smaller type
+ // (int) first and then fall back to the original type (long long).
+ BSONField<int> fieldNModified(nModified());
+ int intNModified;
+ fieldState = FieldParser::extract(source, fieldNModified, &intNModified, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID) {
+ // try falling back to a larger type
+ fieldState = FieldParser::extract(source, nModified, &_nModified, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isNModifiedSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldState == FieldParser::FIELD_SET) {
+ _isNModifiedSet = true;
+ _nModified = intNModified;
}
- void BatchedCommandResponse::cloneTo(BatchedCommandResponse* other) const {
- other->clear();
+ std::vector<BatchedUpsertDetail*>* tempUpsertDetails = NULL;
+ fieldState = FieldParser::extract(source, upsertDetails, &tempUpsertDetails, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _upsertDetails.reset(tempUpsertDetails);
- other->_ok = _ok;
- other->_isOkSet = _isOkSet;
+ fieldState = FieldParser::extract(source, lastOp, &_lastOp, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isLastOpSet = fieldState == FieldParser::FIELD_SET;
- other->_errCode = _errCode;
- other->_isErrCodeSet = _isErrCodeSet;
+ fieldState = FieldParser::extract(source, electionId, &_electionId, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isElectionIdSet = fieldState == FieldParser::FIELD_SET;
- other->_errMessage = _errMessage;
- other->_isErrMessageSet = _isErrMessageSet;
+ std::vector<WriteErrorDetail*>* tempErrDetails = NULL;
+ fieldState = FieldParser::extract(source, writeErrors, &tempErrDetails, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _writeErrorDetails.reset(tempErrDetails);
- other->_nModified = _nModified;
- other->_isNModifiedSet = _isNModifiedSet;
+ WCErrorDetail* wcError = NULL;
+ fieldState = FieldParser::extract(source, writeConcernError, &wcError, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _wcErrDetails.reset(wcError);
- other->_n = _n;
- other->_isNSet = _isNSet;
+ return true;
+}
- other->_singleUpserted = _singleUpserted;
- other->_isSingleUpsertedSet = _isSingleUpsertedSet;
+void BatchedCommandResponse::clear() {
+ _ok = false;
+ _isOkSet = false;
- other->unsetUpsertDetails();
- if (_upsertDetails.get()) {
- for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
- it != _upsertDetails->end();
- ++it) {
- BatchedUpsertDetail* upsertDetailsItem = new BatchedUpsertDetail;
- (*it)->cloneTo(upsertDetailsItem);
- other->addToUpsertDetails(upsertDetailsItem);
- }
- }
+ _errCode = 0;
+ _isErrCodeSet = false;
- other->_lastOp = _lastOp;
- other->_isLastOpSet = _isLastOpSet;
-
- other->_electionId = _electionId;
- other->_isElectionIdSet = _isElectionIdSet;
-
- other->unsetErrDetails();
- if (_writeErrorDetails.get()) {
- for(std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
- it != _writeErrorDetails->end();
- ++it) {
- WriteErrorDetail* errDetailsItem = new WriteErrorDetail;
- (*it)->cloneTo(errDetailsItem);
- other->addToErrDetails(errDetailsItem);
- }
- }
+ _errMessage.clear();
+ _isErrMessageSet = false;
- if (_wcErrDetails.get()) {
- other->_wcErrDetails.reset(new WCErrorDetail());
- _wcErrDetails->cloneTo(other->_wcErrDetails.get());
- }
- }
-
- std::string BatchedCommandResponse::toString() const {
- return toBSON().toString();
- }
+ _nModified = 0;
+ _isNModifiedSet = false;
- void BatchedCommandResponse::setOk(int ok) {
- _ok = ok;
- _isOkSet = true;
- }
+ _n = 0;
+ _isNSet = false;
- void BatchedCommandResponse::unsetOk() {
- _isOkSet = false;
- }
+ _singleUpserted = BSONObj();
+ _isSingleUpsertedSet = false;
- bool BatchedCommandResponse::isOkSet() const {
- return _isOkSet;
- }
-
- int BatchedCommandResponse::getOk() const {
- dassert(_isOkSet);
- return _ok;
+ if (_upsertDetails.get()) {
+ for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
+ it != _upsertDetails->end();
+ ++it) {
+ delete *it;
+ };
+ _upsertDetails.reset();
}
- void BatchedCommandResponse::setErrCode(int errCode) {
- _errCode = errCode;
- _isErrCodeSet = true;
- }
+ _lastOp = Timestamp();
+ _isLastOpSet = false;
- void BatchedCommandResponse::unsetErrCode() {
- _isErrCodeSet = false;
- }
+ _electionId = OID();
+ _isElectionIdSet = false;
- bool BatchedCommandResponse::isErrCodeSet() const {
- return _isErrCodeSet;
+ if (_writeErrorDetails.get()) {
+ for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
+ it != _writeErrorDetails->end();
+ ++it) {
+ delete *it;
+ };
+ _writeErrorDetails.reset();
}
- int BatchedCommandResponse::getErrCode() const {
- if ( _isErrCodeSet ) {
- return _errCode;
- }
- else {
- return errCode.getDefault();
- }
- }
+ _wcErrDetails.reset();
+}
- void BatchedCommandResponse::setErrMessage(StringData errMessage) {
- _errMessage = errMessage.toString();
- _isErrMessageSet = true;
- }
+void BatchedCommandResponse::cloneTo(BatchedCommandResponse* other) const {
+ other->clear();
- void BatchedCommandResponse::unsetErrMessage() {
- _isErrMessageSet = false;
- }
+ other->_ok = _ok;
+ other->_isOkSet = _isOkSet;
- bool BatchedCommandResponse::isErrMessageSet() const {
- return _isErrMessageSet;
- }
+ other->_errCode = _errCode;
+ other->_isErrCodeSet = _isErrCodeSet;
- const std::string& BatchedCommandResponse::getErrMessage() const {
- dassert(_isErrMessageSet);
- return _errMessage;
- }
+ other->_errMessage = _errMessage;
+ other->_isErrMessageSet = _isErrMessageSet;
- void BatchedCommandResponse::setNModified(long long n) {
- _nModified = n;
- _isNModifiedSet = true;
- }
+ other->_nModified = _nModified;
+ other->_isNModifiedSet = _isNModifiedSet;
- void BatchedCommandResponse::unsetNModified() {
- _isNModifiedSet = false;
- }
+ other->_n = _n;
+ other->_isNSet = _isNSet;
- bool BatchedCommandResponse::isNModified() const {
- return _isNModifiedSet;
- }
+ other->_singleUpserted = _singleUpserted;
+ other->_isSingleUpsertedSet = _isSingleUpsertedSet;
- long long BatchedCommandResponse::getNModified() const {
- if ( _isNModifiedSet ) {
- return _nModified;
- }
- else {
- return nModified.getDefault();
+ other->unsetUpsertDetails();
+ if (_upsertDetails.get()) {
+ for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin();
+ it != _upsertDetails->end();
+ ++it) {
+ BatchedUpsertDetail* upsertDetailsItem = new BatchedUpsertDetail;
+ (*it)->cloneTo(upsertDetailsItem);
+ other->addToUpsertDetails(upsertDetailsItem);
}
}
- void BatchedCommandResponse::setN(long long n) {
- _n = n;
- _isNSet = true;
- }
-
- void BatchedCommandResponse::unsetN() {
- _isNSet = false;
- }
+ other->_lastOp = _lastOp;
+ other->_isLastOpSet = _isLastOpSet;
- bool BatchedCommandResponse::isNSet() const {
- return _isNSet;
- }
+ other->_electionId = _electionId;
+ other->_isElectionIdSet = _isElectionIdSet;
- long long BatchedCommandResponse::getN() const {
- if ( _isNSet ) {
- return _n;
- }
- else {
- return n.getDefault();
- }
- }
-
- void BatchedCommandResponse::setUpsertDetails(
- const std::vector<BatchedUpsertDetail*>& upsertDetails) {
- unsetUpsertDetails();
- for (std::vector<BatchedUpsertDetail*>::const_iterator it = upsertDetails.begin();
- it != upsertDetails.end();
+ other->unsetErrDetails();
+ if (_writeErrorDetails.get()) {
+ for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin();
+ it != _writeErrorDetails->end();
++it) {
- unique_ptr<BatchedUpsertDetail> tempBatchedUpsertDetail(new BatchedUpsertDetail);
- (*it)->cloneTo(tempBatchedUpsertDetail.get());
- addToUpsertDetails(tempBatchedUpsertDetail.release());
+ WriteErrorDetail* errDetailsItem = new WriteErrorDetail;
+ (*it)->cloneTo(errDetailsItem);
+ other->addToErrDetails(errDetailsItem);
}
}
- void BatchedCommandResponse::addToUpsertDetails(BatchedUpsertDetail* upsertDetails) {
- if (_upsertDetails.get() == NULL) {
- _upsertDetails.reset(new std::vector<BatchedUpsertDetail*>);
- }
- _upsertDetails->push_back(upsertDetails);
- }
-
- void BatchedCommandResponse::unsetUpsertDetails() {
- if (_upsertDetails.get() != NULL) {
- for (std::vector<BatchedUpsertDetail*>::iterator it = _upsertDetails->begin();
- it != _upsertDetails->end();
- ++it) {
- delete *it;
- }
- _upsertDetails.reset();
- }
- }
-
- bool BatchedCommandResponse::isUpsertDetailsSet() const {
- return _upsertDetails.get() != NULL;
- }
-
- size_t BatchedCommandResponse::sizeUpsertDetails() const {
- dassert(_upsertDetails.get());
- return _upsertDetails->size();
- }
-
- const std::vector<BatchedUpsertDetail*>& BatchedCommandResponse::getUpsertDetails() const {
- dassert(_upsertDetails.get());
- return *_upsertDetails;
- }
-
- const BatchedUpsertDetail* BatchedCommandResponse::getUpsertDetailsAt(size_t pos) const {
- dassert(_upsertDetails.get());
- dassert(_upsertDetails->size() > pos);
- return _upsertDetails->at(pos);
- }
-
- void BatchedCommandResponse::setLastOp(Timestamp lastOp) {
- _lastOp = lastOp;
- _isLastOpSet = true;
+ if (_wcErrDetails.get()) {
+ other->_wcErrDetails.reset(new WCErrorDetail());
+ _wcErrDetails->cloneTo(other->_wcErrDetails.get());
}
+}
+
+std::string BatchedCommandResponse::toString() const {
+ return toBSON().toString();
+}
- void BatchedCommandResponse::unsetLastOp() {
- _isLastOpSet = false;
- }
+void BatchedCommandResponse::setOk(int ok) {
+ _ok = ok;
+ _isOkSet = true;
+}
+
+void BatchedCommandResponse::unsetOk() {
+ _isOkSet = false;
+}
+
+bool BatchedCommandResponse::isOkSet() const {
+ return _isOkSet;
+}
+
+int BatchedCommandResponse::getOk() const {
+ dassert(_isOkSet);
+ return _ok;
+}
- bool BatchedCommandResponse::isLastOpSet() const {
- return _isLastOpSet;
- }
-
- Timestamp BatchedCommandResponse::getLastOp() const {
- dassert(_isLastOpSet);
- return _lastOp;
- }
-
- void BatchedCommandResponse::setElectionId(const OID& electionId) {
- _electionId = electionId;
- _isElectionIdSet = true;
- }
-
- void BatchedCommandResponse::unsetElectionId() {
- _isElectionIdSet = false;
- }
-
- bool BatchedCommandResponse::isElectionIdSet() const {
- return _isElectionIdSet;
- }
-
- OID BatchedCommandResponse::getElectionId() const {
- dassert(_isElectionIdSet);
- return _electionId;
- }
-
- void BatchedCommandResponse::setErrDetails(const std::vector<WriteErrorDetail*>& errDetails) {
- unsetErrDetails();
- for (std::vector<WriteErrorDetail*>::const_iterator it = errDetails.begin();
- it != errDetails.end();
+void BatchedCommandResponse::setErrCode(int errCode) {
+ _errCode = errCode;
+ _isErrCodeSet = true;
+}
+
+void BatchedCommandResponse::unsetErrCode() {
+ _isErrCodeSet = false;
+}
+
+bool BatchedCommandResponse::isErrCodeSet() const {
+ return _isErrCodeSet;
+}
+
+int BatchedCommandResponse::getErrCode() const {
+ if (_isErrCodeSet) {
+ return _errCode;
+ } else {
+ return errCode.getDefault();
+ }
+}
+
+void BatchedCommandResponse::setErrMessage(StringData errMessage) {
+ _errMessage = errMessage.toString();
+ _isErrMessageSet = true;
+}
+
+void BatchedCommandResponse::unsetErrMessage() {
+ _isErrMessageSet = false;
+}
+
+bool BatchedCommandResponse::isErrMessageSet() const {
+ return _isErrMessageSet;
+}
+
+const std::string& BatchedCommandResponse::getErrMessage() const {
+ dassert(_isErrMessageSet);
+ return _errMessage;
+}
+
+void BatchedCommandResponse::setNModified(long long n) {
+ _nModified = n;
+ _isNModifiedSet = true;
+}
+
+void BatchedCommandResponse::unsetNModified() {
+ _isNModifiedSet = false;
+}
+
+bool BatchedCommandResponse::isNModified() const {
+ return _isNModifiedSet;
+}
+
+long long BatchedCommandResponse::getNModified() const {
+ if (_isNModifiedSet) {
+ return _nModified;
+ } else {
+ return nModified.getDefault();
+ }
+}
+
+void BatchedCommandResponse::setN(long long n) {
+ _n = n;
+ _isNSet = true;
+}
+
+void BatchedCommandResponse::unsetN() {
+ _isNSet = false;
+}
+
+bool BatchedCommandResponse::isNSet() const {
+ return _isNSet;
+}
+
+long long BatchedCommandResponse::getN() const {
+ if (_isNSet) {
+ return _n;
+ } else {
+ return n.getDefault();
+ }
+}
+
+void BatchedCommandResponse::setUpsertDetails(
+ const std::vector<BatchedUpsertDetail*>& upsertDetails) {
+ unsetUpsertDetails();
+ for (std::vector<BatchedUpsertDetail*>::const_iterator it = upsertDetails.begin();
+ it != upsertDetails.end();
+ ++it) {
+ unique_ptr<BatchedUpsertDetail> tempBatchedUpsertDetail(new BatchedUpsertDetail);
+ (*it)->cloneTo(tempBatchedUpsertDetail.get());
+ addToUpsertDetails(tempBatchedUpsertDetail.release());
+ }
+}
+
+void BatchedCommandResponse::addToUpsertDetails(BatchedUpsertDetail* upsertDetails) {
+ if (_upsertDetails.get() == NULL) {
+ _upsertDetails.reset(new std::vector<BatchedUpsertDetail*>);
+ }
+ _upsertDetails->push_back(upsertDetails);
+}
+
+void BatchedCommandResponse::unsetUpsertDetails() {
+ if (_upsertDetails.get() != NULL) {
+ for (std::vector<BatchedUpsertDetail*>::iterator it = _upsertDetails->begin();
+ it != _upsertDetails->end();
++it) {
- unique_ptr<WriteErrorDetail> tempBatchErrorDetail(new WriteErrorDetail);
- (*it)->cloneTo(tempBatchErrorDetail.get());
- addToErrDetails(tempBatchErrorDetail.release());
+ delete *it;
}
- }
-
- void BatchedCommandResponse::addToErrDetails(WriteErrorDetail* errDetails) {
- if (_writeErrorDetails.get() == NULL) {
- _writeErrorDetails.reset(new std::vector<WriteErrorDetail*>);
- }
- _writeErrorDetails->push_back(errDetails);
- }
-
- void BatchedCommandResponse::unsetErrDetails() {
- if (_writeErrorDetails.get() != NULL) {
- for(std::vector<WriteErrorDetail*>::iterator it = _writeErrorDetails->begin();
- it != _writeErrorDetails->end();
- ++it) {
- delete *it;
- }
- _writeErrorDetails.reset();
+ _upsertDetails.reset();
+ }
+}
+
+bool BatchedCommandResponse::isUpsertDetailsSet() const {
+ return _upsertDetails.get() != NULL;
+}
+
+size_t BatchedCommandResponse::sizeUpsertDetails() const {
+ dassert(_upsertDetails.get());
+ return _upsertDetails->size();
+}
+
+const std::vector<BatchedUpsertDetail*>& BatchedCommandResponse::getUpsertDetails() const {
+ dassert(_upsertDetails.get());
+ return *_upsertDetails;
+}
+
+const BatchedUpsertDetail* BatchedCommandResponse::getUpsertDetailsAt(size_t pos) const {
+ dassert(_upsertDetails.get());
+ dassert(_upsertDetails->size() > pos);
+ return _upsertDetails->at(pos);
+}
+
+void BatchedCommandResponse::setLastOp(Timestamp lastOp) {
+ _lastOp = lastOp;
+ _isLastOpSet = true;
+}
+
+void BatchedCommandResponse::unsetLastOp() {
+ _isLastOpSet = false;
+}
+
+bool BatchedCommandResponse::isLastOpSet() const {
+ return _isLastOpSet;
+}
+
+Timestamp BatchedCommandResponse::getLastOp() const {
+ dassert(_isLastOpSet);
+ return _lastOp;
+}
+
+void BatchedCommandResponse::setElectionId(const OID& electionId) {
+ _electionId = electionId;
+ _isElectionIdSet = true;
+}
+
+void BatchedCommandResponse::unsetElectionId() {
+ _isElectionIdSet = false;
+}
+
+bool BatchedCommandResponse::isElectionIdSet() const {
+ return _isElectionIdSet;
+}
+
+OID BatchedCommandResponse::getElectionId() const {
+ dassert(_isElectionIdSet);
+ return _electionId;
+}
+
+void BatchedCommandResponse::setErrDetails(const std::vector<WriteErrorDetail*>& errDetails) {
+ unsetErrDetails();
+ for (std::vector<WriteErrorDetail*>::const_iterator it = errDetails.begin();
+ it != errDetails.end();
+ ++it) {
+ unique_ptr<WriteErrorDetail> tempBatchErrorDetail(new WriteErrorDetail);
+ (*it)->cloneTo(tempBatchErrorDetail.get());
+ addToErrDetails(tempBatchErrorDetail.release());
+ }
+}
+
+void BatchedCommandResponse::addToErrDetails(WriteErrorDetail* errDetails) {
+ if (_writeErrorDetails.get() == NULL) {
+ _writeErrorDetails.reset(new std::vector<WriteErrorDetail*>);
+ }
+ _writeErrorDetails->push_back(errDetails);
+}
+
+void BatchedCommandResponse::unsetErrDetails() {
+ if (_writeErrorDetails.get() != NULL) {
+ for (std::vector<WriteErrorDetail*>::iterator it = _writeErrorDetails->begin();
+ it != _writeErrorDetails->end();
+ ++it) {
+ delete *it;
}
+ _writeErrorDetails.reset();
}
+}
- bool BatchedCommandResponse::isErrDetailsSet() const {
- return _writeErrorDetails.get() != NULL;
- }
+bool BatchedCommandResponse::isErrDetailsSet() const {
+ return _writeErrorDetails.get() != NULL;
+}
- size_t BatchedCommandResponse::sizeErrDetails() const {
- dassert(_writeErrorDetails.get());
- return _writeErrorDetails->size();
- }
+size_t BatchedCommandResponse::sizeErrDetails() const {
+ dassert(_writeErrorDetails.get());
+ return _writeErrorDetails->size();
+}
- const std::vector<WriteErrorDetail*>& BatchedCommandResponse::getErrDetails() const {
- dassert(_writeErrorDetails.get());
- return *_writeErrorDetails;
- }
+const std::vector<WriteErrorDetail*>& BatchedCommandResponse::getErrDetails() const {
+ dassert(_writeErrorDetails.get());
+ return *_writeErrorDetails;
+}
- const WriteErrorDetail* BatchedCommandResponse::getErrDetailsAt(size_t pos) const {
- dassert(_writeErrorDetails.get());
- dassert(_writeErrorDetails->size() > pos);
- return _writeErrorDetails->at(pos);
- }
+const WriteErrorDetail* BatchedCommandResponse::getErrDetailsAt(size_t pos) const {
+ dassert(_writeErrorDetails.get());
+ dassert(_writeErrorDetails->size() > pos);
+ return _writeErrorDetails->at(pos);
+}
- void BatchedCommandResponse::setWriteConcernError(WCErrorDetail* error) {
- _wcErrDetails.reset(error);
- }
+void BatchedCommandResponse::setWriteConcernError(WCErrorDetail* error) {
+ _wcErrDetails.reset(error);
+}
- void BatchedCommandResponse::unsetWriteConcernError() {
- _wcErrDetails.reset();
- }
+void BatchedCommandResponse::unsetWriteConcernError() {
+ _wcErrDetails.reset();
+}
- bool BatchedCommandResponse::isWriteConcernErrorSet() const {
- return _wcErrDetails.get();
- }
+bool BatchedCommandResponse::isWriteConcernErrorSet() const {
+ return _wcErrDetails.get();
+}
- const WCErrorDetail* BatchedCommandResponse::getWriteConcernError() const {
- return _wcErrDetails.get();
- }
+const WCErrorDetail* BatchedCommandResponse::getWriteConcernError() const {
+ return _wcErrDetails.get();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_command_response.h b/src/mongo/s/write_ops/batched_command_response.h
index 68cee2abaa6..370f836b921 100644
--- a/src/mongo/s/write_ops/batched_command_response.h
+++ b/src/mongo/s/write_ops/batched_command_response.h
@@ -40,161 +40,161 @@
namespace mongo {
- /**
- * This class represents the layout and content of a insert/update/delete runCommand,
- * the response side.
- */
- class BatchedCommandResponse : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedCommandResponse);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<int> ok;
- static const BSONField<int> errCode;
- static const BSONField<std::string> errMessage;
- static const BSONField<long long> n;
- static const BSONField<long long> nModified;
- static const BSONField<std::vector<BatchedUpsertDetail*> > upsertDetails;
- static const BSONField<Timestamp> lastOp;
- static const BSONField<OID> electionId;
- static const BSONField<std::vector<WriteErrorDetail*> > writeErrors;
- static const BSONField<WCErrorDetail*> writeConcernError;
-
- //
- // construction / destruction
- //
-
- BatchedCommandResponse();
- virtual ~BatchedCommandResponse();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedCommandResponse* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setOk(int ok);
- void unsetOk();
- bool isOkSet() const;
- int getOk() const;
-
- void setErrCode(int errCode);
- void unsetErrCode();
- bool isErrCodeSet() const;
- int getErrCode() const;
-
- void setErrMessage(StringData errMessage);
- void unsetErrMessage();
- bool isErrMessageSet() const;
- const std::string& getErrMessage() const;
-
- void setNModified(long long n);
- void unsetNModified();
- bool isNModified() const;
- long long getNModified() const;
-
- void setN(long long n);
- void unsetN();
- bool isNSet() const;
- long long getN() const;
-
- void setUpsertDetails(const std::vector<BatchedUpsertDetail*>& upsertDetails);
- void addToUpsertDetails(BatchedUpsertDetail* upsertDetails);
- void unsetUpsertDetails();
- bool isUpsertDetailsSet() const;
- std::size_t sizeUpsertDetails() const;
- const std::vector<BatchedUpsertDetail*>& getUpsertDetails() const;
- const BatchedUpsertDetail* getUpsertDetailsAt(std::size_t pos) const;
-
- void setLastOp(Timestamp lastOp);
- void unsetLastOp();
- bool isLastOpSet() const;
- Timestamp getLastOp() const;
-
- void setElectionId(const OID& electionId);
- void unsetElectionId();
- bool isElectionIdSet() const;
- OID getElectionId() const;
-
- void setErrDetails(const std::vector<WriteErrorDetail*>& errDetails);
- // errDetails ownership is transferred to here.
- void addToErrDetails(WriteErrorDetail* errDetails);
- void unsetErrDetails();
- bool isErrDetailsSet() const;
- std::size_t sizeErrDetails() const;
- const std::vector<WriteErrorDetail*>& getErrDetails() const;
- const WriteErrorDetail* getErrDetailsAt(std::size_t pos) const;
-
- void setWriteConcernError(WCErrorDetail* error);
- void unsetWriteConcernError();
- bool isWriteConcernErrorSet() const;
- const WCErrorDetail* getWriteConcernError() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) 0 if batch didn't get to be applied for any reason
- int _ok;
- bool _isOkSet;
-
- // (O) whether all items in the batch applied correctly
- int _errCode;
- bool _isErrCodeSet;
-
- // (O) whether all items in the batch applied correctly
- std::string _errMessage;
- bool _isErrMessageSet;
-
- // (M) number of documents affected
- long long _n;
- bool _isNSet;
-
- // (O) number of documents updated
- long long _nModified;
- bool _isNModifiedSet;
-
- // (O) "promoted" _upserted, if the corresponding request contained only one batch item
- // Should only be present if _upserted is not.
- BSONObj _singleUpserted;
- bool _isSingleUpsertedSet;
-
- // (O) Array of upserted items' _id's
- // Should only be present if _singleUpserted is not.
- std::unique_ptr<std::vector<BatchedUpsertDetail*> >_upsertDetails;
-
- // (O) Timestamp assigned to the write op when it was written to the oplog.
- // Normally, getLastError can use Client::_lastOp, but this is not valid for
- // mongos which loses track of the session due to RCAR. Therefore, we must
- // keep track of the lastOp manually ourselves.
- Timestamp _lastOp;
- bool _isLastOpSet;
-
- // (O) In addition to keeping track of the above lastOp timestamp, we must also keep
- // track of the primary we talked to. This is because if the primary moves,
- // subsequent calls to getLastError are invalid. The only way we know if an
- // election has occurred is to use the unique electionId.
- OID _electionId;
- bool _isElectionIdSet;
-
- // (O) Array of item-level error information
- std::unique_ptr<std::vector<WriteErrorDetail*> >_writeErrorDetails;
-
- // (O) errors that occurred while trying to satisfy the write concern.
- std::unique_ptr<WCErrorDetail> _wcErrDetails;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of a insert/update/delete runCommand,
+ * the response side.
+ */
+class BatchedCommandResponse : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedCommandResponse);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<int> ok;
+ static const BSONField<int> errCode;
+ static const BSONField<std::string> errMessage;
+ static const BSONField<long long> n;
+ static const BSONField<long long> nModified;
+ static const BSONField<std::vector<BatchedUpsertDetail*>> upsertDetails;
+ static const BSONField<Timestamp> lastOp;
+ static const BSONField<OID> electionId;
+ static const BSONField<std::vector<WriteErrorDetail*>> writeErrors;
+ static const BSONField<WCErrorDetail*> writeConcernError;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedCommandResponse();
+ virtual ~BatchedCommandResponse();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedCommandResponse* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setOk(int ok);
+ void unsetOk();
+ bool isOkSet() const;
+ int getOk() const;
+
+ void setErrCode(int errCode);
+ void unsetErrCode();
+ bool isErrCodeSet() const;
+ int getErrCode() const;
+
+ void setErrMessage(StringData errMessage);
+ void unsetErrMessage();
+ bool isErrMessageSet() const;
+ const std::string& getErrMessage() const;
+
+ void setNModified(long long n);
+ void unsetNModified();
+ bool isNModified() const;
+ long long getNModified() const;
+
+ void setN(long long n);
+ void unsetN();
+ bool isNSet() const;
+ long long getN() const;
+
+ void setUpsertDetails(const std::vector<BatchedUpsertDetail*>& upsertDetails);
+ void addToUpsertDetails(BatchedUpsertDetail* upsertDetails);
+ void unsetUpsertDetails();
+ bool isUpsertDetailsSet() const;
+ std::size_t sizeUpsertDetails() const;
+ const std::vector<BatchedUpsertDetail*>& getUpsertDetails() const;
+ const BatchedUpsertDetail* getUpsertDetailsAt(std::size_t pos) const;
+
+ void setLastOp(Timestamp lastOp);
+ void unsetLastOp();
+ bool isLastOpSet() const;
+ Timestamp getLastOp() const;
+
+ void setElectionId(const OID& electionId);
+ void unsetElectionId();
+ bool isElectionIdSet() const;
+ OID getElectionId() const;
+
+ void setErrDetails(const std::vector<WriteErrorDetail*>& errDetails);
+ // errDetails ownership is transferred to here.
+ void addToErrDetails(WriteErrorDetail* errDetails);
+ void unsetErrDetails();
+ bool isErrDetailsSet() const;
+ std::size_t sizeErrDetails() const;
+ const std::vector<WriteErrorDetail*>& getErrDetails() const;
+ const WriteErrorDetail* getErrDetailsAt(std::size_t pos) const;
+
+ void setWriteConcernError(WCErrorDetail* error);
+ void unsetWriteConcernError();
+ bool isWriteConcernErrorSet() const;
+ const WCErrorDetail* getWriteConcernError() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) 0 if batch didn't get to be applied for any reason
+ int _ok;
+ bool _isOkSet;
+
+ // (O) whether all items in the batch applied correctly
+ int _errCode;
+ bool _isErrCodeSet;
+
+ // (O) whether all items in the batch applied correctly
+ std::string _errMessage;
+ bool _isErrMessageSet;
+
+ // (M) number of documents affected
+ long long _n;
+ bool _isNSet;
+
+ // (O) number of documents updated
+ long long _nModified;
+ bool _isNModifiedSet;
+
+ // (O) "promoted" _upserted, if the corresponding request contained only one batch item
+ // Should only be present if _upserted is not.
+ BSONObj _singleUpserted;
+ bool _isSingleUpsertedSet;
+
+ // (O) Array of upserted items' _id's
+ // Should only be present if _singleUpserted is not.
+ std::unique_ptr<std::vector<BatchedUpsertDetail*>> _upsertDetails;
+
+ // (O) Timestamp assigned to the write op when it was written to the oplog.
+ // Normally, getLastError can use Client::_lastOp, but this is not valid for
+ // mongos which loses track of the session due to RCAR. Therefore, we must
+ // keep track of the lastOp manually ourselves.
+ Timestamp _lastOp;
+ bool _isLastOpSet;
+
+ // (O) In addition to keeping track of the above lastOp timestamp, we must also keep
+ // track of the primary we talked to. This is because if the primary moves,
+ // subsequent calls to getLastError are invalid. The only way we know if an
+ // election has occurred is to use the unique electionId.
+ OID _electionId;
+ bool _isElectionIdSet;
+
+ // (O) Array of item-level error information
+ std::unique_ptr<std::vector<WriteErrorDetail*>> _writeErrorDetails;
+
+ // (O) errors that occurred while trying to satisfy the write concern.
+ std::unique_ptr<WCErrorDetail> _wcErrDetails;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index e97b567e761..ba53f201128 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -37,51 +37,42 @@
namespace {
- using mongo::BSONArray;
- using mongo::BSONObj;
- using mongo::BatchedCommandResponse;
- using mongo::WriteErrorDetail;
- using mongo::WCErrorDetail;
- using mongo::Date_t;
- using std::string;
+using mongo::BSONArray;
+using mongo::BSONObj;
+using mongo::BatchedCommandResponse;
+using mongo::WriteErrorDetail;
+using mongo::WCErrorDetail;
+using mongo::Date_t;
+using std::string;
- TEST(RoundTrip, Normal) {
+TEST(RoundTrip, Normal) {
+ BSONArray writeErrorsArray = BSON_ARRAY(
+ BSON(WriteErrorDetail::index(0) << WriteErrorDetail::errCode(-2)
+ << WriteErrorDetail::errInfo(BSON("more info" << 1))
+ << WriteErrorDetail::errMessage("index 0 failed"))
+ << BSON(WriteErrorDetail::index(1) << WriteErrorDetail::errCode(-3)
+ << WriteErrorDetail::errInfo(BSON("more info" << 1))
+ << WriteErrorDetail::errMessage("index 1 failed too")));
- BSONArray writeErrorsArray =
- BSON_ARRAY(
- BSON(WriteErrorDetail::index(0) <<
- WriteErrorDetail::errCode(-2) <<
- WriteErrorDetail::errInfo(BSON("more info" << 1)) <<
- WriteErrorDetail::errMessage("index 0 failed")
- ) <<
- BSON(WriteErrorDetail::index(1) <<
- WriteErrorDetail::errCode(-3) <<
- WriteErrorDetail::errInfo(BSON("more info" << 1)) <<
- WriteErrorDetail::errMessage("index 1 failed too")
- )
- );
+ BSONObj writeConcernError(BSON(WCErrorDetail::errCode(8)
+ << WCErrorDetail::errInfo(BSON("a" << 1))
+ << WCErrorDetail::errMessage("norepl")));
- BSONObj writeConcernError(
- BSON(WCErrorDetail::errCode(8) <<
- WCErrorDetail::errInfo(BSON("a" << 1)) <<
- WCErrorDetail::errMessage("norepl")));
+ BSONObj origResponseObj = BSON(
+ BatchedCommandResponse::ok(false)
+ << BatchedCommandResponse::errCode(-1)
+ << BatchedCommandResponse::errMessage("this batch didn't work")
+ << BatchedCommandResponse::n(0) << BatchedCommandResponse::lastOp(mongo::Timestamp(1ULL))
+ << BatchedCommandResponse::writeErrors() << writeErrorsArray
+ << BatchedCommandResponse::writeConcernError() << writeConcernError);
- BSONObj origResponseObj =
- BSON(BatchedCommandResponse::ok(false) <<
- BatchedCommandResponse::errCode(-1) <<
- BatchedCommandResponse::errMessage("this batch didn't work") <<
- BatchedCommandResponse::n(0) <<
- BatchedCommandResponse::lastOp(mongo::Timestamp(1ULL)) <<
- BatchedCommandResponse::writeErrors() << writeErrorsArray <<
- BatchedCommandResponse::writeConcernError() << writeConcernError);
+ string errMsg;
+ BatchedCommandResponse response;
+ bool ok = response.parseBSON(origResponseObj, &errMsg);
+ ASSERT_TRUE(ok);
- string errMsg;
- BatchedCommandResponse response;
- bool ok = response.parseBSON(origResponseObj, &errMsg);
- ASSERT_TRUE(ok);
+ BSONObj genResponseObj = response.toBSON();
+ ASSERT_EQUALS(0, genResponseObj.woCompare(origResponseObj));
+}
- BSONObj genResponseObj = response.toBSON();
- ASSERT_EQUALS(0, genResponseObj.woCompare(origResponseObj));
- }
-
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_delete_document.cpp b/src/mongo/s/write_ops/batched_delete_document.cpp
index e12e5ad7a86..2cca2deea4d 100644
--- a/src/mongo/s/write_ops/batched_delete_document.cpp
+++ b/src/mongo/s/write_ops/batched_delete_document.cpp
@@ -33,130 +33,133 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
- const BSONField<BSONObj> BatchedDeleteDocument::query("q");
- const BSONField<int> BatchedDeleteDocument::limit("limit");
+using mongoutils::str::stream;
+const BSONField<BSONObj> BatchedDeleteDocument::query("q");
+const BSONField<int> BatchedDeleteDocument::limit("limit");
- BatchedDeleteDocument::BatchedDeleteDocument() {
- clear();
+BatchedDeleteDocument::BatchedDeleteDocument() {
+ clear();
+}
+
+BatchedDeleteDocument::~BatchedDeleteDocument() {}
+
+bool BatchedDeleteDocument::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BatchedDeleteDocument::~BatchedDeleteDocument() {
+ // All the mandatory fields must be present.
+ if (!_isQuerySet) {
+ *errMsg = stream() << "missing " << query.name() << " field";
+ return false;
}
- bool BatchedDeleteDocument::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // All the mandatory fields must be present.
- if (!_isQuerySet) {
- *errMsg = stream() << "missing " << query.name() << " field";
- return false;
- }
-
- if (!_isLimitSet) {
- *errMsg = stream() << "missing " << limit.name() << " field";
- return false;
- }
-
- if (_limit != 0 && _limit != 1) {
- *errMsg = stream() << "specify either a 0 to delete all"
- << "matching documents or 1 to delete a single document";
- return false;
- }
-
- return true;
+ if (!_isLimitSet) {
+ *errMsg = stream() << "missing " << limit.name() << " field";
+ return false;
}
- BSONObj BatchedDeleteDocument::toBSON() const {
- BSONObjBuilder builder;
+ if (_limit != 0 && _limit != 1) {
+ *errMsg = stream() << "specify either a 0 to delete all"
+ << "matching documents or 1 to delete a single document";
+ return false;
+ }
- if (_isQuerySet) builder.append(query(), _query);
+ return true;
+}
- if (_isLimitSet) builder.append(limit(), _limit);
+BSONObj BatchedDeleteDocument::toBSON() const {
+ BSONObjBuilder builder;
- return builder.obj();
- }
+ if (_isQuerySet)
+ builder.append(query(), _query);
- bool BatchedDeleteDocument::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+ if (_isLimitSet)
+ builder.append(limit(), _limit);
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+ return builder.obj();
+}
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, query, &_query, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isQuerySet = fieldState == FieldParser::FIELD_SET;
+bool BatchedDeleteDocument::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- fieldState = FieldParser::extractNumber(source, limit, &_limit, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isLimitSet = fieldState == FieldParser::FIELD_SET;
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- return true;
- }
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, query, &_query, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isQuerySet = fieldState == FieldParser::FIELD_SET;
- void BatchedDeleteDocument::clear() {
- _query = BSONObj();
- _isQuerySet = false;
+ fieldState = FieldParser::extractNumber(source, limit, &_limit, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isLimitSet = fieldState == FieldParser::FIELD_SET;
- _limit = 0;
- _isLimitSet = false;
+ return true;
+}
- }
+void BatchedDeleteDocument::clear() {
+ _query = BSONObj();
+ _isQuerySet = false;
- void BatchedDeleteDocument::cloneTo(BatchedDeleteDocument* other) const {
- other->clear();
+ _limit = 0;
+ _isLimitSet = false;
+}
- other->_query = _query;
- other->_isQuerySet = _isQuerySet;
+void BatchedDeleteDocument::cloneTo(BatchedDeleteDocument* other) const {
+ other->clear();
- other->_limit = _limit;
- other->_isLimitSet = _isLimitSet;
- }
+ other->_query = _query;
+ other->_isQuerySet = _isQuerySet;
- std::string BatchedDeleteDocument::toString() const {
- return toBSON().toString();
- }
+ other->_limit = _limit;
+ other->_isLimitSet = _isLimitSet;
+}
- void BatchedDeleteDocument::setQuery(const BSONObj& query) {
- _query = query.getOwned();
- _isQuerySet = true;
- }
+std::string BatchedDeleteDocument::toString() const {
+ return toBSON().toString();
+}
- void BatchedDeleteDocument::unsetQuery() {
- _isQuerySet = false;
- }
+void BatchedDeleteDocument::setQuery(const BSONObj& query) {
+ _query = query.getOwned();
+ _isQuerySet = true;
+}
- bool BatchedDeleteDocument::isQuerySet() const {
- return _isQuerySet;
- }
+void BatchedDeleteDocument::unsetQuery() {
+ _isQuerySet = false;
+}
- const BSONObj& BatchedDeleteDocument::getQuery() const {
- dassert(_isQuerySet);
- return _query;
- }
+bool BatchedDeleteDocument::isQuerySet() const {
+ return _isQuerySet;
+}
- void BatchedDeleteDocument::setLimit(int limit) {
- _limit = limit;
- _isLimitSet = true;
- }
+const BSONObj& BatchedDeleteDocument::getQuery() const {
+ dassert(_isQuerySet);
+ return _query;
+}
- void BatchedDeleteDocument::unsetLimit() {
- _isLimitSet = false;
- }
+void BatchedDeleteDocument::setLimit(int limit) {
+ _limit = limit;
+ _isLimitSet = true;
+}
- bool BatchedDeleteDocument::isLimitSet() const {
- return _isLimitSet;
- }
+void BatchedDeleteDocument::unsetLimit() {
+ _isLimitSet = false;
+}
- int BatchedDeleteDocument::getLimit() const {
- dassert(_isLimitSet);
- return _limit;
- }
+bool BatchedDeleteDocument::isLimitSet() const {
+ return _isLimitSet;
+}
+
+int BatchedDeleteDocument::getLimit() const {
+ dassert(_isLimitSet);
+ return _limit;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_delete_document.h b/src/mongo/s/write_ops/batched_delete_document.h
index 346d2cdb77d..26ab8c44716 100644
--- a/src/mongo/s/write_ops/batched_delete_document.h
+++ b/src/mongo/s/write_ops/batched_delete_document.h
@@ -37,65 +37,65 @@
namespace mongo {
- /**
- * This class represents the layout and content of a delete document runCommand,
- * in the resquest side.
- */
- class BatchedDeleteDocument : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedDeleteDocument);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<BSONObj> query;
- static const BSONField<int> limit;
-
- //
- // construction / destruction
- //
-
- BatchedDeleteDocument();
- virtual ~BatchedDeleteDocument();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedDeleteDocument* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setQuery(const BSONObj& query);
- void unsetQuery();
- bool isQuerySet() const;
- const BSONObj& getQuery() const;
-
- void setLimit(int limit);
- void unsetLimit();
- bool isLimitSet() const;
- int getLimit() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) query whose result the delete will remove
- BSONObj _query;
- bool _isQuerySet;
-
- // (M) the maximum number of documents to be deleted
- int _limit;
- bool _isLimitSet;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of a delete document runCommand,
+ * in the resquest side.
+ */
+class BatchedDeleteDocument : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedDeleteDocument);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<BSONObj> query;
+ static const BSONField<int> limit;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedDeleteDocument();
+ virtual ~BatchedDeleteDocument();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedDeleteDocument* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setQuery(const BSONObj& query);
+ void unsetQuery();
+ bool isQuerySet() const;
+ const BSONObj& getQuery() const;
+
+ void setLimit(int limit);
+ void unsetLimit();
+ bool isLimitSet() const;
+ int getLimit() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) query whose result the delete will remove
+ BSONObj _query;
+ bool _isQuerySet;
+
+ // (M) the maximum number of documents to be deleted
+ int _limit;
+ bool _isLimitSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_delete_request.cpp b/src/mongo/s/write_ops/batched_delete_request.cpp
index b0c6a3f7a7a..d99df0ffbfc 100644
--- a/src/mongo/s/write_ops/batched_delete_request.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request.cpp
@@ -33,279 +33,287 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- using mongoutils::str::stream;
-
- const std::string BatchedDeleteRequest::BATCHED_DELETE_REQUEST = "delete";
- const BSONField<std::string> BatchedDeleteRequest::collName( "delete" );
- const BSONField<std::vector<BatchedDeleteDocument*> > BatchedDeleteRequest::deletes( "deletes" );
- const BSONField<BSONObj> BatchedDeleteRequest::writeConcern( "writeConcern" );
- const BSONField<bool> BatchedDeleteRequest::ordered( "ordered", true );
- const BSONField<BSONObj> BatchedDeleteRequest::metadata("metadata");
-
- BatchedDeleteRequest::BatchedDeleteRequest() {
- clear();
+using std::unique_ptr;
+using std::string;
+
+using mongoutils::str::stream;
+
+const std::string BatchedDeleteRequest::BATCHED_DELETE_REQUEST = "delete";
+const BSONField<std::string> BatchedDeleteRequest::collName("delete");
+const BSONField<std::vector<BatchedDeleteDocument*>> BatchedDeleteRequest::deletes("deletes");
+const BSONField<BSONObj> BatchedDeleteRequest::writeConcern("writeConcern");
+const BSONField<bool> BatchedDeleteRequest::ordered("ordered", true);
+const BSONField<BSONObj> BatchedDeleteRequest::metadata("metadata");
+
+BatchedDeleteRequest::BatchedDeleteRequest() {
+ clear();
+}
+
+BatchedDeleteRequest::~BatchedDeleteRequest() {
+ unsetDeletes();
+}
+
+bool BatchedDeleteRequest::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BatchedDeleteRequest::~BatchedDeleteRequest() {
- unsetDeletes();
+ // All the mandatory fields must be present.
+ if (!_isCollNameSet) {
+ *errMsg = stream() << "missing " << collName.name() << " field";
+ return false;
}
- bool BatchedDeleteRequest::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // All the mandatory fields must be present.
- if (!_isCollNameSet) {
- *errMsg = stream() << "missing " << collName.name() << " field";
- return false;
- }
-
- if (!_isDeletesSet) {
- *errMsg = stream() << "missing " << deletes.name() << " field";
- return false;
- }
-
- return true;
+ if (!_isDeletesSet) {
+ *errMsg = stream() << "missing " << deletes.name() << " field";
+ return false;
}
- BSONObj BatchedDeleteRequest::toBSON() const {
- BSONObjBuilder builder;
-
- if (_isCollNameSet) builder.append(collName(), _collName);
+ return true;
+}
- if (_isDeletesSet) {
- BSONArrayBuilder deletesBuilder(builder.subarrayStart(deletes()));
- for (std::vector<BatchedDeleteDocument*>::const_iterator it = _deletes.begin();
- it != _deletes.end();
- ++it) {
- BSONObj deleteDocument = (*it)->toBSON();
- deletesBuilder.append(deleteDocument);
- }
- deletesBuilder.done();
- }
-
- if (_isWriteConcernSet) builder.append(writeConcern(), _writeConcern);
+BSONObj BatchedDeleteRequest::toBSON() const {
+ BSONObjBuilder builder;
- if (_isOrderedSet) builder.append(ordered(), _ordered);
+ if (_isCollNameSet)
+ builder.append(collName(), _collName);
- if (_metadata) builder.append(metadata(), _metadata->toBSON());
-
- return builder.obj();
- }
-
- bool BatchedDeleteRequest::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
- std::string collNameTemp;
- fieldState = FieldParser::extract(source, collName, &collNameTemp, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _collName = NamespaceString(collNameTemp);
- _isCollNameSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, deletes, &_deletes, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isDeletesSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, writeConcern, &_writeConcern, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
-
- fieldState = FieldParser::extract(source, ordered, &_ordered, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isOrderedSet = fieldState == FieldParser::FIELD_SET;
-
- BSONObj metadataObj;
- fieldState = FieldParser::extract(source, metadata, &metadataObj, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
-
- if (!metadataObj.isEmpty()) {
- _metadata.reset(new BatchedRequestMetadata());
- if (!_metadata->parseBSON(metadataObj, errMsg)) {
- return false;
- }
+ if (_isDeletesSet) {
+ BSONArrayBuilder deletesBuilder(builder.subarrayStart(deletes()));
+ for (std::vector<BatchedDeleteDocument*>::const_iterator it = _deletes.begin();
+ it != _deletes.end();
+ ++it) {
+ BSONObj deleteDocument = (*it)->toBSON();
+ deletesBuilder.append(deleteDocument);
}
-
- return true;
+ deletesBuilder.done();
}
- void BatchedDeleteRequest::clear() {
- _collName = NamespaceString();
- _isCollNameSet = false;
+ if (_isWriteConcernSet)
+ builder.append(writeConcern(), _writeConcern);
- unsetDeletes();
+ if (_isOrderedSet)
+ builder.append(ordered(), _ordered);
- _writeConcern = BSONObj();
- _isWriteConcernSet = false;
+ if (_metadata)
+ builder.append(metadata(), _metadata->toBSON());
- _ordered = false;
- _isOrderedSet = false;
+ return builder.obj();
+}
- _metadata.reset();
- }
+bool BatchedDeleteRequest::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- void BatchedDeleteRequest::cloneTo(BatchedDeleteRequest* other) const {
- other->clear();
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- other->_collName = _collName;
- other->_isCollNameSet = _isCollNameSet;
+ FieldParser::FieldState fieldState;
+ std::string collNameTemp;
+ fieldState = FieldParser::extract(source, collName, &collNameTemp, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _collName = NamespaceString(collNameTemp);
+ _isCollNameSet = fieldState == FieldParser::FIELD_SET;
- for(std::vector<BatchedDeleteDocument*>::const_iterator it = _deletes.begin();
- it != _deletes.end();
- ++it) {
- unique_ptr<BatchedDeleteDocument> tempBatchDeleteDocument(new BatchedDeleteDocument);
- (*it)->cloneTo(tempBatchDeleteDocument.get());
- other->addToDeletes(tempBatchDeleteDocument.release());
- }
- other->_isDeletesSet = _isDeletesSet;
+ fieldState = FieldParser::extract(source, deletes, &_deletes, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isDeletesSet = fieldState == FieldParser::FIELD_SET;
- other->_writeConcern = _writeConcern;
- other->_isWriteConcernSet = _isWriteConcernSet;
+ fieldState = FieldParser::extract(source, writeConcern, &_writeConcern, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
- other->_ordered = _ordered;
- other->_isOrderedSet = _isOrderedSet;
+ fieldState = FieldParser::extract(source, ordered, &_ordered, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isOrderedSet = fieldState == FieldParser::FIELD_SET;
- if (_metadata) {
- other->_metadata.reset(new BatchedRequestMetadata());
- _metadata->cloneTo(other->_metadata.get());
- }
- }
+ BSONObj metadataObj;
+ fieldState = FieldParser::extract(source, metadata, &metadataObj, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
- std::string BatchedDeleteRequest::toString() const {
- return toBSON().toString();
- }
-
- void BatchedDeleteRequest::setCollName(StringData collName) {
- _collName = NamespaceString(collName);
- _isCollNameSet = true;
- }
-
- const std::string& BatchedDeleteRequest::getCollName() const {
- dassert(_isCollNameSet);
- return _collName.ns();
+ if (!metadataObj.isEmpty()) {
+ _metadata.reset(new BatchedRequestMetadata());
+ if (!_metadata->parseBSON(metadataObj, errMsg)) {
+ return false;
+ }
}
- void BatchedDeleteRequest::setCollNameNS(const NamespaceString& collName) {
- _collName = collName;
- _isCollNameSet = true;
- }
+ return true;
+}
- const NamespaceString& BatchedDeleteRequest::getCollNameNS() const {
- dassert(_isCollNameSet);
- return _collName;
- }
+void BatchedDeleteRequest::clear() {
+ _collName = NamespaceString();
+ _isCollNameSet = false;
- const NamespaceString& BatchedDeleteRequest::getTargetingNSS() const {
- return getCollNameNS();
- }
+ unsetDeletes();
- void BatchedDeleteRequest::setDeletes(const std::vector<BatchedDeleteDocument*>& deletes) {
- for (std::vector<BatchedDeleteDocument*>::const_iterator it = deletes.begin();
- it != deletes.end();
- ++it) {
- unique_ptr<BatchedDeleteDocument> tempBatchDeleteDocument(new BatchedDeleteDocument);
- (*it)->cloneTo(tempBatchDeleteDocument.get());
- addToDeletes(tempBatchDeleteDocument.release());
- }
- _isDeletesSet = deletes.size() > 0;
- }
-
- void BatchedDeleteRequest::addToDeletes(BatchedDeleteDocument* deletes) {
- _deletes.push_back(deletes);
- _isDeletesSet = true;
- }
-
- void BatchedDeleteRequest::unsetDeletes() {
- for(std::vector<BatchedDeleteDocument*>::iterator it = _deletes.begin();
- it != _deletes.end();
- ++it) {
- delete *it;
- }
- _deletes.clear();
- _isDeletesSet = false;
- }
+ _writeConcern = BSONObj();
+ _isWriteConcernSet = false;
- bool BatchedDeleteRequest::isDeletesSet() const {
- return _isDeletesSet;
- }
+ _ordered = false;
+ _isOrderedSet = false;
- size_t BatchedDeleteRequest::sizeDeletes() const {
- return _deletes.size();
- }
+ _metadata.reset();
+}
- const std::vector<BatchedDeleteDocument*>& BatchedDeleteRequest::getDeletes() const {
- dassert(_isDeletesSet);
- return _deletes;
- }
+void BatchedDeleteRequest::cloneTo(BatchedDeleteRequest* other) const {
+ other->clear();
- const BatchedDeleteDocument* BatchedDeleteRequest::getDeletesAt(size_t pos) const {
- dassert(_isDeletesSet);
- dassert(_deletes.size() > pos);
- return _deletes.at(pos);
- }
+ other->_collName = _collName;
+ other->_isCollNameSet = _isCollNameSet;
- void BatchedDeleteRequest::setWriteConcern(const BSONObj& writeConcern) {
- _writeConcern = writeConcern.getOwned();
- _isWriteConcernSet = true;
+ for (std::vector<BatchedDeleteDocument*>::const_iterator it = _deletes.begin();
+ it != _deletes.end();
+ ++it) {
+ unique_ptr<BatchedDeleteDocument> tempBatchDeleteDocument(new BatchedDeleteDocument);
+ (*it)->cloneTo(tempBatchDeleteDocument.get());
+ other->addToDeletes(tempBatchDeleteDocument.release());
}
+ other->_isDeletesSet = _isDeletesSet;
- void BatchedDeleteRequest::unsetWriteConcern() {
- _isWriteConcernSet = false;
- }
+ other->_writeConcern = _writeConcern;
+ other->_isWriteConcernSet = _isWriteConcernSet;
- bool BatchedDeleteRequest::isWriteConcernSet() const {
- return _isWriteConcernSet;
- }
+ other->_ordered = _ordered;
+ other->_isOrderedSet = _isOrderedSet;
- const BSONObj& BatchedDeleteRequest::getWriteConcern() const {
- dassert(_isWriteConcernSet);
- return _writeConcern;
+ if (_metadata) {
+ other->_metadata.reset(new BatchedRequestMetadata());
+ _metadata->cloneTo(other->_metadata.get());
}
-
- void BatchedDeleteRequest::setOrdered(bool ordered) {
- _ordered = ordered;
- _isOrderedSet = true;
+}
+
+std::string BatchedDeleteRequest::toString() const {
+ return toBSON().toString();
+}
+
+void BatchedDeleteRequest::setCollName(StringData collName) {
+ _collName = NamespaceString(collName);
+ _isCollNameSet = true;
+}
+
+const std::string& BatchedDeleteRequest::getCollName() const {
+ dassert(_isCollNameSet);
+ return _collName.ns();
+}
+
+void BatchedDeleteRequest::setCollNameNS(const NamespaceString& collName) {
+ _collName = collName;
+ _isCollNameSet = true;
+}
+
+const NamespaceString& BatchedDeleteRequest::getCollNameNS() const {
+ dassert(_isCollNameSet);
+ return _collName;
+}
+
+const NamespaceString& BatchedDeleteRequest::getTargetingNSS() const {
+ return getCollNameNS();
+}
+
+void BatchedDeleteRequest::setDeletes(const std::vector<BatchedDeleteDocument*>& deletes) {
+ for (std::vector<BatchedDeleteDocument*>::const_iterator it = deletes.begin();
+ it != deletes.end();
+ ++it) {
+ unique_ptr<BatchedDeleteDocument> tempBatchDeleteDocument(new BatchedDeleteDocument);
+ (*it)->cloneTo(tempBatchDeleteDocument.get());
+ addToDeletes(tempBatchDeleteDocument.release());
}
-
- void BatchedDeleteRequest::unsetOrdered() {
- _isOrderedSet = false;
- }
-
- bool BatchedDeleteRequest::isOrderedSet() const {
- return _isOrderedSet;
+ _isDeletesSet = deletes.size() > 0;
+}
+
+void BatchedDeleteRequest::addToDeletes(BatchedDeleteDocument* deletes) {
+ _deletes.push_back(deletes);
+ _isDeletesSet = true;
+}
+
+void BatchedDeleteRequest::unsetDeletes() {
+ for (std::vector<BatchedDeleteDocument*>::iterator it = _deletes.begin(); it != _deletes.end();
+ ++it) {
+ delete *it;
}
-
- bool BatchedDeleteRequest::getOrdered() const {
- if (_isOrderedSet) {
- return _ordered;
- }
- else {
- return ordered.getDefault();
- }
+ _deletes.clear();
+ _isDeletesSet = false;
+}
+
+bool BatchedDeleteRequest::isDeletesSet() const {
+ return _isDeletesSet;
+}
+
+size_t BatchedDeleteRequest::sizeDeletes() const {
+ return _deletes.size();
+}
+
+const std::vector<BatchedDeleteDocument*>& BatchedDeleteRequest::getDeletes() const {
+ dassert(_isDeletesSet);
+ return _deletes;
+}
+
+const BatchedDeleteDocument* BatchedDeleteRequest::getDeletesAt(size_t pos) const {
+ dassert(_isDeletesSet);
+ dassert(_deletes.size() > pos);
+ return _deletes.at(pos);
+}
+
+void BatchedDeleteRequest::setWriteConcern(const BSONObj& writeConcern) {
+ _writeConcern = writeConcern.getOwned();
+ _isWriteConcernSet = true;
+}
+
+void BatchedDeleteRequest::unsetWriteConcern() {
+ _isWriteConcernSet = false;
+}
+
+bool BatchedDeleteRequest::isWriteConcernSet() const {
+ return _isWriteConcernSet;
+}
+
+const BSONObj& BatchedDeleteRequest::getWriteConcern() const {
+ dassert(_isWriteConcernSet);
+ return _writeConcern;
+}
+
+void BatchedDeleteRequest::setOrdered(bool ordered) {
+ _ordered = ordered;
+ _isOrderedSet = true;
+}
+
+void BatchedDeleteRequest::unsetOrdered() {
+ _isOrderedSet = false;
+}
+
+bool BatchedDeleteRequest::isOrderedSet() const {
+ return _isOrderedSet;
+}
+
+bool BatchedDeleteRequest::getOrdered() const {
+ if (_isOrderedSet) {
+ return _ordered;
+ } else {
+ return ordered.getDefault();
}
+}
- void BatchedDeleteRequest::setMetadata(BatchedRequestMetadata* metadata) {
- _metadata.reset(metadata);
- }
+void BatchedDeleteRequest::setMetadata(BatchedRequestMetadata* metadata) {
+ _metadata.reset(metadata);
+}
- void BatchedDeleteRequest::unsetMetadata() {
- _metadata.reset();
- }
+void BatchedDeleteRequest::unsetMetadata() {
+ _metadata.reset();
+}
- bool BatchedDeleteRequest::isMetadataSet() const {
- return _metadata.get();
- }
+bool BatchedDeleteRequest::isMetadataSet() const {
+ return _metadata.get();
+}
- BatchedRequestMetadata* BatchedDeleteRequest::getMetadata() const {
- return _metadata.get();
- }
+BatchedRequestMetadata* BatchedDeleteRequest::getMetadata() const {
+ return _metadata.get();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_delete_request.h b/src/mongo/s/write_ops/batched_delete_request.h
index e3846833bec..ff73155360d 100644
--- a/src/mongo/s/write_ops/batched_delete_request.h
+++ b/src/mongo/s/write_ops/batched_delete_request.h
@@ -40,117 +40,119 @@
namespace mongo {
+/**
+ * This class represents the layout and content of a batched delete runCommand,
+ * the request side.
+ */
+class BatchedDeleteRequest : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedDeleteRequest);
+
+public:
+ //
+ // schema declarations
+ //
+
+ // Name used for the batched delete invocation.
+ static const std::string BATCHED_DELETE_REQUEST;
+
+ // Field names and types in the batched delete command type.
+ static const BSONField<std::string> collName;
+ static const BSONField<std::vector<BatchedDeleteDocument*>> deletes;
+ static const BSONField<BSONObj> writeConcern;
+ static const BSONField<bool> ordered;
+ static const BSONField<BSONObj> metadata;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedDeleteRequest();
+ virtual ~BatchedDeleteRequest();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedDeleteRequest* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setCollName(StringData collName);
+ void setCollNameNS(const NamespaceString& collName);
+ const std::string& getCollName() const;
+ const NamespaceString& getCollNameNS() const;
+
+ const NamespaceString& getTargetingNSS() const;
+
+ void setDeletes(const std::vector<BatchedDeleteDocument*>& deletes);
+
/**
- * This class represents the layout and content of a batched delete runCommand,
- * the request side.
+ * deletes ownership is transferred to here.
*/
- class BatchedDeleteRequest : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedDeleteRequest);
- public:
-
- //
- // schema declarations
- //
-
- // Name used for the batched delete invocation.
- static const std::string BATCHED_DELETE_REQUEST;
-
- // Field names and types in the batched delete command type.
- static const BSONField<std::string> collName;
- static const BSONField<std::vector<BatchedDeleteDocument*> > deletes;
- static const BSONField<BSONObj> writeConcern;
- static const BSONField<bool> ordered;
- static const BSONField<BSONObj> metadata;
-
- //
- // construction / destruction
- //
-
- BatchedDeleteRequest();
- virtual ~BatchedDeleteRequest();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedDeleteRequest* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setCollName(StringData collName);
- void setCollNameNS(const NamespaceString& collName);
- const std::string& getCollName() const;
- const NamespaceString& getCollNameNS() const;
-
- const NamespaceString& getTargetingNSS() const;
-
- void setDeletes(const std::vector<BatchedDeleteDocument*>& deletes);
-
- /**
- * deletes ownership is transferred to here.
- */
- void addToDeletes(BatchedDeleteDocument* deletes);
- void unsetDeletes();
- bool isDeletesSet() const;
- std::size_t sizeDeletes() const;
- const std::vector<BatchedDeleteDocument*>& getDeletes() const;
- const BatchedDeleteDocument* getDeletesAt(std::size_t pos) const;
-
- void setWriteConcern(const BSONObj& writeConcern);
- void unsetWriteConcern();
- bool isWriteConcernSet() const;
- const BSONObj& getWriteConcern() const;
-
- void setOrdered(bool ordered);
- void unsetOrdered();
- bool isOrderedSet() const;
- bool getOrdered() const;
-
- /*
- * metadata ownership will be transferred to this.
- */
- void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
- bool isMetadataSet() const;
- BatchedRequestMetadata* getMetadata() const;
-
- /**
- * These are no-ops since delete never validates documents. They only exist to fulfill the
- * unified API.
- */
- void setShouldBypassValidation(bool newVal) {}
- bool shouldBypassValidation() const { return false; }
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) collection we're deleting from
- NamespaceString _collName;
- bool _isCollNameSet;
-
- // (M) array of individual deletes
- std::vector<BatchedDeleteDocument*> _deletes;
- bool _isDeletesSet;
-
- // (O) to be issued after the batch applied
- BSONObj _writeConcern;
- bool _isWriteConcernSet;
-
- // (O) whether batch is issued in parallel or not
- bool _ordered;
- bool _isOrderedSet;
-
- // (O) metadata associated with this request for internal use.
- std::unique_ptr<BatchedRequestMetadata> _metadata;
- };
-
-} // namespace mongo
+ void addToDeletes(BatchedDeleteDocument* deletes);
+ void unsetDeletes();
+ bool isDeletesSet() const;
+ std::size_t sizeDeletes() const;
+ const std::vector<BatchedDeleteDocument*>& getDeletes() const;
+ const BatchedDeleteDocument* getDeletesAt(std::size_t pos) const;
+
+ void setWriteConcern(const BSONObj& writeConcern);
+ void unsetWriteConcern();
+ bool isWriteConcernSet() const;
+ const BSONObj& getWriteConcern() const;
+
+ void setOrdered(bool ordered);
+ void unsetOrdered();
+ bool isOrderedSet() const;
+ bool getOrdered() const;
+
+ /*
+ * metadata ownership will be transferred to this.
+ */
+ void setMetadata(BatchedRequestMetadata* metadata);
+ void unsetMetadata();
+ bool isMetadataSet() const;
+ BatchedRequestMetadata* getMetadata() const;
+
+ /**
+ * These are no-ops since delete never validates documents. They only exist to fulfill the
+ * unified API.
+ */
+ void setShouldBypassValidation(bool newVal) {}
+ bool shouldBypassValidation() const {
+ return false;
+ }
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) collection we're deleting from
+ NamespaceString _collName;
+ bool _isCollNameSet;
+
+ // (M) array of individual deletes
+ std::vector<BatchedDeleteDocument*> _deletes;
+ bool _isDeletesSet;
+
+ // (O) to be issued after the batch applied
+ BSONObj _writeConcern;
+ bool _isWriteConcernSet;
+
+ // (O) whether batch is issued in parallel or not
+ bool _ordered;
+ bool _isOrderedSet;
+
+ // (O) metadata associated with this request for internal use.
+ std::unique_ptr<BatchedRequestMetadata> _metadata;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_delete_request_test.cpp b/src/mongo/s/write_ops/batched_delete_request_test.cpp
index 33aa331259a..3ecf8aedea4 100644
--- a/src/mongo/s/write_ops/batched_delete_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request_test.cpp
@@ -36,53 +36,46 @@
namespace {
- using mongo::BSONArray;
- using mongo::BSONObj;
- using mongo::BatchedDeleteRequest;
- using mongo::BatchedDeleteDocument;
- using mongo::BatchedRequestMetadata;
- using mongo::BSONArrayBuilder;
- using mongo::OID;
- using mongo::Timestamp;
- using std::string;
+using mongo::BSONArray;
+using mongo::BSONObj;
+using mongo::BatchedDeleteRequest;
+using mongo::BatchedDeleteDocument;
+using mongo::BatchedRequestMetadata;
+using mongo::BSONArrayBuilder;
+using mongo::OID;
+using mongo::Timestamp;
+using std::string;
- TEST(RoundTrip, Normal) {
- BSONArray deleteArray =
- BSON_ARRAY(
- BSON(BatchedDeleteDocument::query(BSON("a" << 1)) <<
- BatchedDeleteDocument::limit(1)
- ) <<
- BSON(BatchedDeleteDocument::query(BSON("b" << 1)) <<
- BatchedDeleteDocument::limit(1)
- )
- );
+TEST(RoundTrip, Normal) {
+ BSONArray deleteArray = BSON_ARRAY(
+ BSON(BatchedDeleteDocument::query(BSON("a" << 1)) << BatchedDeleteDocument::limit(1))
+ << BSON(BatchedDeleteDocument::query(BSON("b" << 1)) << BatchedDeleteDocument::limit(1)));
- BSONObj writeConcernObj = BSON("w" << 1);
+ BSONObj writeConcernObj = BSON("w" << 1);
- // The BSON_ARRAY macro doesn't support Timestamps.
- BSONArrayBuilder arrBuilder;
- arrBuilder.append(Timestamp(1,1));
- arrBuilder.append(OID::gen());
- BSONArray shardVersionArray = arrBuilder.arr();
+ // The BSON_ARRAY macro doesn't support Timestamps.
+ BSONArrayBuilder arrBuilder;
+ arrBuilder.append(Timestamp(1, 1));
+ arrBuilder.append(OID::gen());
+ BSONArray shardVersionArray = arrBuilder.arr();
- BSONObj origDeleteRequestObj =
- BSON(BatchedDeleteRequest::collName("test") <<
- BatchedDeleteRequest::deletes() << deleteArray <<
- BatchedDeleteRequest::writeConcern(writeConcernObj) <<
- BatchedDeleteRequest::ordered(true) <<
- BatchedDeleteRequest::metadata() <<
- BSON(BatchedRequestMetadata::shardName("shard000") <<
- BatchedRequestMetadata::shardVersion() << shardVersionArray <<
- BatchedRequestMetadata::session(0)));
+ BSONObj origDeleteRequestObj =
+ BSON(BatchedDeleteRequest::collName("test")
+ << BatchedDeleteRequest::deletes() << deleteArray
+ << BatchedDeleteRequest::writeConcern(writeConcernObj)
+ << BatchedDeleteRequest::ordered(true) << BatchedDeleteRequest::metadata()
+ << BSON(BatchedRequestMetadata::shardName("shard000")
+ << BatchedRequestMetadata::shardVersion() << shardVersionArray
+ << BatchedRequestMetadata::session(0)));
- string errMsg;
- BatchedDeleteRequest request;
- bool ok = request.parseBSON(origDeleteRequestObj, &errMsg);
- ASSERT_TRUE(ok);
+ string errMsg;
+ BatchedDeleteRequest request;
+ bool ok = request.parseBSON(origDeleteRequestObj, &errMsg);
+ ASSERT_TRUE(ok);
- BSONObj genDeleteRequestObj = request.toBSON();
- ASSERT_EQUALS(0, genDeleteRequestObj.woCompare(origDeleteRequestObj));
- }
+ BSONObj genDeleteRequestObj = request.toBSON();
+ ASSERT_EQUALS(0, genDeleteRequestObj.woCompare(origDeleteRequestObj));
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_insert_request.cpp b/src/mongo/s/write_ops/batched_insert_request.cpp
index 94f7daabe0a..f5b862098c7 100644
--- a/src/mongo/s/write_ops/batched_insert_request.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request.cpp
@@ -34,295 +34,296 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
+using mongoutils::str::stream;
- const std::string BatchedInsertRequest::BATCHED_INSERT_REQUEST = "insert";
- const BSONField<std::string> BatchedInsertRequest::collName("insert");
- const BSONField<std::vector<BSONObj> > BatchedInsertRequest::documents("documents");
- const BSONField<BSONObj> BatchedInsertRequest::writeConcern("writeConcern");
- const BSONField<bool> BatchedInsertRequest::ordered("ordered", true);
- const BSONField<BSONObj> BatchedInsertRequest::metadata("metadata");
+const std::string BatchedInsertRequest::BATCHED_INSERT_REQUEST = "insert";
+const BSONField<std::string> BatchedInsertRequest::collName("insert");
+const BSONField<std::vector<BSONObj>> BatchedInsertRequest::documents("documents");
+const BSONField<BSONObj> BatchedInsertRequest::writeConcern("writeConcern");
+const BSONField<bool> BatchedInsertRequest::ordered("ordered", true);
+const BSONField<BSONObj> BatchedInsertRequest::metadata("metadata");
- BatchedInsertRequest::BatchedInsertRequest() {
- clear();
- }
+BatchedInsertRequest::BatchedInsertRequest() {
+ clear();
+}
- BatchedInsertRequest::~BatchedInsertRequest() {
- }
+BatchedInsertRequest::~BatchedInsertRequest() {}
- bool BatchedInsertRequest::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // All the mandatory fields must be present.
- if (!_isCollNameSet) {
- *errMsg = stream() << "missing " << collName.name() << " field";
- return false;
- }
-
- if (!_isDocumentsSet) {
- *errMsg = stream() << "missing " << documents.name() << " field";
- return false;
- }
-
- return true;
+bool BatchedInsertRequest::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BSONObj BatchedInsertRequest::toBSON() const {
- BSONObjBuilder builder;
-
- if (_isCollNameSet) builder.append(collName(), _collName);
-
- if (_isDocumentsSet) {
- BSONArrayBuilder documentsBuilder(builder.subarrayStart(documents()));
- for (std::vector<BSONObj>::const_iterator it = _documents.begin();
- it != _documents.end();
- ++it) {
- documentsBuilder.append(*it);
- }
- documentsBuilder.done();
- }
-
- if (_isWriteConcernSet) builder.append(writeConcern(), _writeConcern);
-
- if (_isOrderedSet) builder.append(ordered(), _ordered);
-
- if (_metadata) builder.append(metadata(), _metadata->toBSON());
-
- if (_shouldBypassValidation) builder.append(bypassDocumentValidationCommandOption(), true);
-
- return builder.obj();
+ // All the mandatory fields must be present.
+ if (!_isCollNameSet) {
+ *errMsg = stream() << "missing " << collName.name() << " field";
+ return false;
}
- static void extractIndexNSS(const BSONObj& indexDesc, NamespaceString* indexNSS) {
- *indexNSS = NamespaceString(indexDesc["ns"].str());
+ if (!_isDocumentsSet) {
+ *errMsg = stream() << "missing " << documents.name() << " field";
+ return false;
}
- bool BatchedInsertRequest::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+ return true;
+}
- BSONObjIterator sourceIt(source);
+BSONObj BatchedInsertRequest::toBSON() const {
+ BSONObjBuilder builder;
- while ( sourceIt.more() ) {
+ if (_isCollNameSet)
+ builder.append(collName(), _collName);
- BSONElement sourceEl = sourceIt.next();
-
- if ( collName() == sourceEl.fieldName() ) {
- std::string temp;
- FieldParser::FieldState fieldState =
- FieldParser::extract( sourceEl, collName, &temp, errMsg );
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _collName = NamespaceString(temp);
- _isCollNameSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( documents() == sourceEl.fieldName() ) {
- FieldParser::FieldState fieldState =
- FieldParser::extract( sourceEl, documents, &_documents, errMsg );
- if ( fieldState == FieldParser::FIELD_INVALID ) return false;
- _isDocumentsSet = fieldState == FieldParser::FIELD_SET;
- if (_documents.size() >= 1)
- extractIndexNSS(_documents.at(0), &_targetNSS);
- }
- else if ( writeConcern() == sourceEl.fieldName() ) {
- FieldParser::FieldState fieldState =
- FieldParser::extract(sourceEl, writeConcern, &_writeConcern, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( ordered() == sourceEl.fieldName() ) {
- FieldParser::FieldState fieldState =
- FieldParser::extract(sourceEl, ordered, &_ordered, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isOrderedSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( metadata() == sourceEl.fieldName() ) {
- BSONObj metadataObj;
- FieldParser::FieldState fieldState =
- FieldParser::extract(sourceEl, metadata, &metadataObj, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
-
- if (!metadataObj.isEmpty()) {
- _metadata.reset(new BatchedRequestMetadata());
- if (!_metadata->parseBSON(metadataObj, errMsg)) {
- return false;
- }
- }
- }
- else if (bypassDocumentValidationCommandOption() == sourceEl.fieldNameStringData()) {
- _shouldBypassValidation = sourceEl.trueValue();
- }
+ if (_isDocumentsSet) {
+ BSONArrayBuilder documentsBuilder(builder.subarrayStart(documents()));
+ for (std::vector<BSONObj>::const_iterator it = _documents.begin(); it != _documents.end();
+ ++it) {
+ documentsBuilder.append(*it);
}
-
- return true;
+ documentsBuilder.done();
}
- void BatchedInsertRequest::clear() {
- _collName = NamespaceString();
- _targetNSS = NamespaceString();
- _isCollNameSet = false;
-
- _documents.clear();
- _isDocumentsSet =false;
-
- _writeConcern = BSONObj();
- _isWriteConcernSet = false;
-
- _ordered = false;
- _isOrderedSet = false;
-
- _shouldBypassValidation = false;
-
- _metadata.reset();
- }
-
- void BatchedInsertRequest::cloneTo(BatchedInsertRequest* other) const {
- other->clear();
-
- other->_collName = _collName;
- other->_targetNSS = _targetNSS;
- other->_isCollNameSet = _isCollNameSet;
-
- for(std::vector<BSONObj>::const_iterator it = _documents.begin();
- it != _documents.end();
- ++it) {
- other->addToDocuments(*it);
- }
- other->_isDocumentsSet = _isDocumentsSet;
-
- other->_writeConcern = _writeConcern;
- other->_isWriteConcernSet = _isWriteConcernSet;
-
- other->_ordered = _ordered;
- other->_isOrderedSet = _isOrderedSet;
-
- if (_metadata) {
- other->_metadata.reset(new BatchedRequestMetadata());
- _metadata->cloneTo(other->_metadata.get());
+ if (_isWriteConcernSet)
+ builder.append(writeConcern(), _writeConcern);
+
+ if (_isOrderedSet)
+ builder.append(ordered(), _ordered);
+
+ if (_metadata)
+ builder.append(metadata(), _metadata->toBSON());
+
+ if (_shouldBypassValidation)
+ builder.append(bypassDocumentValidationCommandOption(), true);
+
+ return builder.obj();
+}
+
+static void extractIndexNSS(const BSONObj& indexDesc, NamespaceString* indexNSS) {
+ *indexNSS = NamespaceString(indexDesc["ns"].str());
+}
+
+bool BatchedInsertRequest::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
+
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
+
+ BSONObjIterator sourceIt(source);
+
+ while (sourceIt.more()) {
+ BSONElement sourceEl = sourceIt.next();
+
+ if (collName() == sourceEl.fieldName()) {
+ std::string temp;
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(sourceEl, collName, &temp, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _collName = NamespaceString(temp);
+ _isCollNameSet = fieldState == FieldParser::FIELD_SET;
+ } else if (documents() == sourceEl.fieldName()) {
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(sourceEl, documents, &_documents, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isDocumentsSet = fieldState == FieldParser::FIELD_SET;
+ if (_documents.size() >= 1)
+ extractIndexNSS(_documents.at(0), &_targetNSS);
+ } else if (writeConcern() == sourceEl.fieldName()) {
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(sourceEl, writeConcern, &_writeConcern, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
+ } else if (ordered() == sourceEl.fieldName()) {
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(sourceEl, ordered, &_ordered, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isOrderedSet = fieldState == FieldParser::FIELD_SET;
+ } else if (metadata() == sourceEl.fieldName()) {
+ BSONObj metadataObj;
+ FieldParser::FieldState fieldState =
+ FieldParser::extract(sourceEl, metadata, &metadataObj, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+
+ if (!metadataObj.isEmpty()) {
+ _metadata.reset(new BatchedRequestMetadata());
+ if (!_metadata->parseBSON(metadataObj, errMsg)) {
+ return false;
+ }
+ }
+ } else if (bypassDocumentValidationCommandOption() == sourceEl.fieldNameStringData()) {
+ _shouldBypassValidation = sourceEl.trueValue();
}
}
- std::string BatchedInsertRequest::toString() const {
- return toBSON().toString();
- }
+ return true;
+}
- void BatchedInsertRequest::setCollName(StringData collName) {
- _collName = NamespaceString(collName);
- _isCollNameSet = true;
- }
+void BatchedInsertRequest::clear() {
+ _collName = NamespaceString();
+ _targetNSS = NamespaceString();
+ _isCollNameSet = false;
- const std::string& BatchedInsertRequest::getCollName() const {
- dassert(_isCollNameSet);
- return _collName.ns();
- }
+ _documents.clear();
+ _isDocumentsSet = false;
- void BatchedInsertRequest::setCollNameNS(const NamespaceString& collName) {
- _collName = collName;
- _isCollNameSet = true;
- }
+ _writeConcern = BSONObj();
+ _isWriteConcernSet = false;
- const NamespaceString& BatchedInsertRequest::getCollNameNS() const {
- dassert(_isCollNameSet);
- return _collName;
- }
+ _ordered = false;
+ _isOrderedSet = false;
- const NamespaceString& BatchedInsertRequest::getTargetingNSS() const {
- return _targetNSS;
- }
+ _shouldBypassValidation = false;
- void BatchedInsertRequest::addToDocuments(const BSONObj& documents) {
- _documents.push_back(documents);
- _isDocumentsSet = true;
+ _metadata.reset();
+}
- if (_documents.size() == 1)
- extractIndexNSS(_documents.at(0), &_targetNSS);
- }
+void BatchedInsertRequest::cloneTo(BatchedInsertRequest* other) const {
+ other->clear();
- bool BatchedInsertRequest::isDocumentsSet() const {
- return _isDocumentsSet;
- }
+ other->_collName = _collName;
+ other->_targetNSS = _targetNSS;
+ other->_isCollNameSet = _isCollNameSet;
- size_t BatchedInsertRequest::sizeDocuments() const {
- return _documents.size();
+ for (std::vector<BSONObj>::const_iterator it = _documents.begin(); it != _documents.end();
+ ++it) {
+ other->addToDocuments(*it);
}
+ other->_isDocumentsSet = _isDocumentsSet;
- const std::vector<BSONObj>& BatchedInsertRequest::getDocuments() const {
- dassert(_isDocumentsSet);
- return _documents;
- }
+ other->_writeConcern = _writeConcern;
+ other->_isWriteConcernSet = _isWriteConcernSet;
- const BSONObj& BatchedInsertRequest::getDocumentsAt(size_t pos) const {
- dassert(_isDocumentsSet);
- dassert(_documents.size() > pos);
- return _documents.at(pos);
- }
+ other->_ordered = _ordered;
+ other->_isOrderedSet = _isOrderedSet;
- void BatchedInsertRequest::setDocumentAt(size_t pos, const BSONObj& doc) {
- dassert(_isDocumentsSet);
- dassert(_documents.size() > pos);
- _documents[pos] = doc;
+ if (_metadata) {
+ other->_metadata.reset(new BatchedRequestMetadata());
+ _metadata->cloneTo(other->_metadata.get());
}
-
- void BatchedInsertRequest::setWriteConcern(const BSONObj& writeConcern) {
- _writeConcern = writeConcern.getOwned();
- _isWriteConcernSet = true;
+}
+
+std::string BatchedInsertRequest::toString() const {
+ return toBSON().toString();
+}
+
+void BatchedInsertRequest::setCollName(StringData collName) {
+ _collName = NamespaceString(collName);
+ _isCollNameSet = true;
+}
+
+const std::string& BatchedInsertRequest::getCollName() const {
+ dassert(_isCollNameSet);
+ return _collName.ns();
+}
+
+void BatchedInsertRequest::setCollNameNS(const NamespaceString& collName) {
+ _collName = collName;
+ _isCollNameSet = true;
+}
+
+const NamespaceString& BatchedInsertRequest::getCollNameNS() const {
+ dassert(_isCollNameSet);
+ return _collName;
+}
+
+const NamespaceString& BatchedInsertRequest::getTargetingNSS() const {
+ return _targetNSS;
+}
+
+void BatchedInsertRequest::addToDocuments(const BSONObj& documents) {
+ _documents.push_back(documents);
+ _isDocumentsSet = true;
+
+ if (_documents.size() == 1)
+ extractIndexNSS(_documents.at(0), &_targetNSS);
+}
+
+bool BatchedInsertRequest::isDocumentsSet() const {
+ return _isDocumentsSet;
+}
+
+size_t BatchedInsertRequest::sizeDocuments() const {
+ return _documents.size();
+}
+
+const std::vector<BSONObj>& BatchedInsertRequest::getDocuments() const {
+ dassert(_isDocumentsSet);
+ return _documents;
+}
+
+const BSONObj& BatchedInsertRequest::getDocumentsAt(size_t pos) const {
+ dassert(_isDocumentsSet);
+ dassert(_documents.size() > pos);
+ return _documents.at(pos);
+}
+
+void BatchedInsertRequest::setDocumentAt(size_t pos, const BSONObj& doc) {
+ dassert(_isDocumentsSet);
+ dassert(_documents.size() > pos);
+ _documents[pos] = doc;
+}
+
+void BatchedInsertRequest::setWriteConcern(const BSONObj& writeConcern) {
+ _writeConcern = writeConcern.getOwned();
+ _isWriteConcernSet = true;
+}
+
+void BatchedInsertRequest::unsetWriteConcern() {
+ _isWriteConcernSet = false;
+}
+
+bool BatchedInsertRequest::isWriteConcernSet() const {
+ return _isWriteConcernSet;
+}
+
+const BSONObj& BatchedInsertRequest::getWriteConcern() const {
+ dassert(_isWriteConcernSet);
+ return _writeConcern;
+}
+
+void BatchedInsertRequest::setOrdered(bool ordered) {
+ _ordered = ordered;
+ _isOrderedSet = true;
+}
+
+void BatchedInsertRequest::unsetOrdered() {
+ _isOrderedSet = false;
+}
+
+bool BatchedInsertRequest::isOrderedSet() const {
+ return _isOrderedSet;
+}
+
+bool BatchedInsertRequest::getOrdered() const {
+ if (_isOrderedSet) {
+ return _ordered;
+ } else {
+ return ordered.getDefault();
}
+}
- void BatchedInsertRequest::unsetWriteConcern() {
- _isWriteConcernSet = false;
- }
+void BatchedInsertRequest::setMetadata(BatchedRequestMetadata* metadata) {
+ _metadata.reset(metadata);
+}
- bool BatchedInsertRequest::isWriteConcernSet() const {
- return _isWriteConcernSet;
- }
+void BatchedInsertRequest::unsetMetadata() {
+ _metadata.reset();
+}
- const BSONObj& BatchedInsertRequest::getWriteConcern() const {
- dassert(_isWriteConcernSet);
- return _writeConcern;
- }
-
- void BatchedInsertRequest::setOrdered(bool ordered) {
- _ordered = ordered;
- _isOrderedSet = true;
- }
+bool BatchedInsertRequest::isMetadataSet() const {
+ return _metadata.get();
+}
- void BatchedInsertRequest::unsetOrdered() {
- _isOrderedSet = false;
- }
-
- bool BatchedInsertRequest::isOrderedSet() const {
- return _isOrderedSet;
- }
-
- bool BatchedInsertRequest::getOrdered() const {
- if (_isOrderedSet) {
- return _ordered;
- }
- else {
- return ordered.getDefault();
- }
- }
-
- void BatchedInsertRequest::setMetadata(BatchedRequestMetadata* metadata) {
- _metadata.reset(metadata);
- }
-
- void BatchedInsertRequest::unsetMetadata() {
- _metadata.reset();
- }
-
- bool BatchedInsertRequest::isMetadataSet() const {
- return _metadata.get();
- }
-
- BatchedRequestMetadata* BatchedInsertRequest::getMetadata() const {
- return _metadata.get();
- }
+BatchedRequestMetadata* BatchedInsertRequest::getMetadata() const {
+ return _metadata.get();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_insert_request.h b/src/mongo/s/write_ops/batched_insert_request.h
index 8b5238b888e..baf0fbbbe2b 100644
--- a/src/mongo/s/write_ops/batched_insert_request.h
+++ b/src/mongo/s/write_ops/batched_insert_request.h
@@ -39,114 +39,118 @@
namespace mongo {
- /**
- * This class represents the layout and content of a batched insert runCommand,
- * the request side.
+/**
+ * This class represents the layout and content of a batched insert runCommand,
+ * the request side.
+ */
+class BatchedInsertRequest : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedInsertRequest);
+
+public:
+ //
+ // schema declarations
+ //
+
+ // Name used for the batched insert invocation.
+ static const std::string BATCHED_INSERT_REQUEST;
+
+ // Field names and types in the batched insert command type.
+ static const BSONField<std::string> collName;
+ static const BSONField<std::vector<BSONObj>> documents;
+ static const BSONField<BSONObj> writeConcern;
+ static const BSONField<bool> ordered;
+ static const BSONField<BSONObj> metadata;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedInsertRequest();
+ virtual ~BatchedInsertRequest();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedInsertRequest* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setCollName(StringData collName);
+ void setCollNameNS(const NamespaceString& collName);
+ const std::string& getCollName() const;
+ const NamespaceString& getCollNameNS() const;
+
+ const NamespaceString& getTargetingNSS() const;
+
+ void addToDocuments(const BSONObj& documents);
+ bool isDocumentsSet() const;
+ std::size_t sizeDocuments() const;
+ const std::vector<BSONObj>& getDocuments() const;
+ const BSONObj& getDocumentsAt(std::size_t pos) const;
+ void setDocumentAt(std::size_t pos, const BSONObj& doc);
+
+ void setWriteConcern(const BSONObj& writeConcern);
+ void unsetWriteConcern();
+ bool isWriteConcernSet() const;
+ const BSONObj& getWriteConcern() const;
+
+ void setOrdered(bool ordered);
+ void unsetOrdered();
+ bool isOrderedSet() const;
+ bool getOrdered() const;
+
+ void setShouldBypassValidation(bool newVal) {
+ _shouldBypassValidation = newVal;
+ }
+ bool shouldBypassValidation() const {
+ return _shouldBypassValidation;
+ }
+
+ /*
+ * metadata ownership will be transferred to this.
*/
- class BatchedInsertRequest : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedInsertRequest);
- public:
-
- //
- // schema declarations
- //
-
- // Name used for the batched insert invocation.
- static const std::string BATCHED_INSERT_REQUEST;
-
- // Field names and types in the batched insert command type.
- static const BSONField<std::string> collName;
- static const BSONField<std::vector<BSONObj> > documents;
- static const BSONField<BSONObj> writeConcern;
- static const BSONField<bool> ordered;
- static const BSONField<BSONObj> metadata;
-
- //
- // construction / destruction
- //
-
- BatchedInsertRequest();
- virtual ~BatchedInsertRequest();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedInsertRequest* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setCollName(StringData collName);
- void setCollNameNS(const NamespaceString& collName);
- const std::string& getCollName() const;
- const NamespaceString& getCollNameNS() const;
-
- const NamespaceString& getTargetingNSS() const;
-
- void addToDocuments(const BSONObj& documents);
- bool isDocumentsSet() const;
- std::size_t sizeDocuments() const;
- const std::vector<BSONObj>& getDocuments() const;
- const BSONObj& getDocumentsAt(std::size_t pos) const;
- void setDocumentAt(std::size_t pos, const BSONObj& doc);
-
- void setWriteConcern(const BSONObj& writeConcern);
- void unsetWriteConcern();
- bool isWriteConcernSet() const;
- const BSONObj& getWriteConcern() const;
-
- void setOrdered(bool ordered);
- void unsetOrdered();
- bool isOrderedSet() const;
- bool getOrdered() const;
-
- void setShouldBypassValidation(bool newVal) { _shouldBypassValidation = newVal; }
- bool shouldBypassValidation() const { return _shouldBypassValidation; }
-
- /*
- * metadata ownership will be transferred to this.
- */
- void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
- bool isMetadataSet() const;
- BatchedRequestMetadata* getMetadata() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) collection we're inserting on
- NamespaceString _collName;
- bool _isCollNameSet;
-
- // (M) array of documents to be inserted
- std::vector<BSONObj> _documents;
- bool _isDocumentsSet;
-
- // (O) to be issued after the batch applied
- BSONObj _writeConcern;
- bool _isWriteConcernSet;
-
- // (O) whether batch is issued in parallel or not
- bool _ordered;
- bool _isOrderedSet;
-
- // (O) metadata associated with this request for internal use.
- std::unique_ptr<BatchedRequestMetadata> _metadata;
-
- // (O) cached copied of target ns
- NamespaceString _targetNSS;
-
- // (O) should document validation be bypassed (default false)
- bool _shouldBypassValidation;
- };
-
-} // namespace mongo
+ void setMetadata(BatchedRequestMetadata* metadata);
+ void unsetMetadata();
+ bool isMetadataSet() const;
+ BatchedRequestMetadata* getMetadata() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) collection we're inserting on
+ NamespaceString _collName;
+ bool _isCollNameSet;
+
+ // (M) array of documents to be inserted
+ std::vector<BSONObj> _documents;
+ bool _isDocumentsSet;
+
+ // (O) to be issued after the batch applied
+ BSONObj _writeConcern;
+ bool _isWriteConcernSet;
+
+ // (O) whether batch is issued in parallel or not
+ bool _ordered;
+ bool _isOrderedSet;
+
+ // (O) metadata associated with this request for internal use.
+ std::unique_ptr<BatchedRequestMetadata> _metadata;
+
+ // (O) cached copied of target ns
+ NamespaceString _targetNSS;
+
+ // (O) should document validation be bypassed (default false)
+ bool _shouldBypassValidation;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_insert_request_test.cpp b/src/mongo/s/write_ops/batched_insert_request_test.cpp
index 5be837ae17a..66f99c7ffc6 100644
--- a/src/mongo/s/write_ops/batched_insert_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request_test.cpp
@@ -36,115 +36,111 @@
namespace {
- using namespace mongo;
- using std::unique_ptr;
- using std::string;
-
- TEST(RoundTrip, Normal) {
- BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1));
-
- BSONObj writeConcernObj = BSON("w" << 1);
-
- // The BSON_ARRAY macro doesn't support Timestamps.
- BSONArrayBuilder arrBuilder;
- arrBuilder.append(Timestamp(1,1));
- arrBuilder.append(OID::gen());
- BSONArray shardVersionArray = arrBuilder.arr();
-
- BSONObj origInsertRequestObj =
- BSON(BatchedInsertRequest::collName("test") <<
- BatchedInsertRequest::documents() << insertArray <<
- BatchedInsertRequest::writeConcern(writeConcernObj) <<
- BatchedInsertRequest::ordered(true) <<
- BatchedInsertRequest::metadata() << BSON(
- BatchedRequestMetadata::shardName("shard0000") <<
- BatchedRequestMetadata::shardVersion() << shardVersionArray <<
- BatchedRequestMetadata::session(0)));
-
- string errMsg;
- BatchedInsertRequest request;
- bool ok = request.parseBSON(origInsertRequestObj, &errMsg);
- ASSERT_TRUE(ok);
-
- BSONObj genInsertRequestObj = request.toBSON();
- ASSERT_EQUALS(0, genInsertRequestObj.woCompare(origInsertRequestObj));
- }
-
- TEST(GenID, All) {
-
- BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
- BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
-
- request.setCollName("foo.bar");
- request.setOrdered(false);
-
- BSONObj insertA = BSON( "a" << 1 );
- BSONObj insertB = BSON( "b" << 1 );
- request.addToDocuments(insertA);
- request.addToDocuments(insertB);
-
- unique_ptr<BatchedCommandRequest> idCmdRequest;
- idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
- ASSERT(idCmdRequest.get());
-
- BatchedInsertRequest* idRequest = idCmdRequest->getInsertRequest();
- ASSERT_EQUALS(idRequest->getCollName(), request.getCollName());
- ASSERT_EQUALS(idRequest->getOrdered(), request.getOrdered());
-
- ASSERT(!idRequest->getDocumentsAt(0)["_id"].eoo());
- ASSERT_EQUALS(idRequest->getDocumentsAt(0).nFields(), 2);
- ASSERT(!idRequest->getDocumentsAt(1)["_id"].eoo());
- ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
- }
-
- TEST(GenID, Partial) {
-
- BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
- BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
-
- request.setCollName("foo.bar");
- request.setOrdered(false);
-
- BSONObj insertA = BSON( "a" << 1 );
- BSONObj insertB = BSON( "b" << 1 << "_id" << 1 );
- BSONObj insertC = BSON( "c" << 1 );
- request.addToDocuments(insertA);
- request.addToDocuments(insertB);
- request.addToDocuments(insertC);
-
- unique_ptr<BatchedCommandRequest> idCmdRequest;
- idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
- ASSERT(idCmdRequest.get());
-
- BatchedInsertRequest* idRequest = idCmdRequest->getInsertRequest();
- ASSERT_EQUALS(idRequest->getCollName(), request.getCollName());
- ASSERT_EQUALS(idRequest->getOrdered(), request.getOrdered());
-
- ASSERT(!idRequest->getDocumentsAt(0)["_id"].eoo());
- ASSERT_EQUALS(idRequest->getDocumentsAt(0).nFields(), 2);
- ASSERT(!idRequest->getDocumentsAt(1)["_id"].eoo());
- ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
- ASSERT(!idRequest->getDocumentsAt(2)["_id"].eoo());
- ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
- }
-
- TEST(GenID, None) {
-
- BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
- BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
-
- // We need to check for system.indexes namespace
- request.setCollName("foo.bar");
-
- BSONObj insertA = BSON( "_id" << 0 << "a" << 1 );
- BSONObj insertB = BSON( "b" << 1 << "_id" << 1 );
- request.addToDocuments(insertA);
- request.addToDocuments(insertB);
-
- unique_ptr<BatchedCommandRequest> idCmdRequest;
- idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
- ASSERT(!idCmdRequest.get());
- }
-
-
-} // unnamed namespace
+using namespace mongo;
+using std::unique_ptr;
+using std::string;
+
+TEST(RoundTrip, Normal) {
+ BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1));
+
+ BSONObj writeConcernObj = BSON("w" << 1);
+
+ // The BSON_ARRAY macro doesn't support Timestamps.
+ BSONArrayBuilder arrBuilder;
+ arrBuilder.append(Timestamp(1, 1));
+ arrBuilder.append(OID::gen());
+ BSONArray shardVersionArray = arrBuilder.arr();
+
+ BSONObj origInsertRequestObj =
+ BSON(BatchedInsertRequest::collName("test")
+ << BatchedInsertRequest::documents() << insertArray
+ << BatchedInsertRequest::writeConcern(writeConcernObj)
+ << BatchedInsertRequest::ordered(true) << BatchedInsertRequest::metadata()
+ << BSON(BatchedRequestMetadata::shardName("shard0000")
+ << BatchedRequestMetadata::shardVersion() << shardVersionArray
+ << BatchedRequestMetadata::session(0)));
+
+ string errMsg;
+ BatchedInsertRequest request;
+ bool ok = request.parseBSON(origInsertRequestObj, &errMsg);
+ ASSERT_TRUE(ok);
+
+ BSONObj genInsertRequestObj = request.toBSON();
+ ASSERT_EQUALS(0, genInsertRequestObj.woCompare(origInsertRequestObj));
+}
+
+TEST(GenID, All) {
+ BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
+ BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
+
+ request.setCollName("foo.bar");
+ request.setOrdered(false);
+
+ BSONObj insertA = BSON("a" << 1);
+ BSONObj insertB = BSON("b" << 1);
+ request.addToDocuments(insertA);
+ request.addToDocuments(insertB);
+
+ unique_ptr<BatchedCommandRequest> idCmdRequest;
+ idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
+ ASSERT(idCmdRequest.get());
+
+ BatchedInsertRequest* idRequest = idCmdRequest->getInsertRequest();
+ ASSERT_EQUALS(idRequest->getCollName(), request.getCollName());
+ ASSERT_EQUALS(idRequest->getOrdered(), request.getOrdered());
+
+ ASSERT(!idRequest->getDocumentsAt(0)["_id"].eoo());
+ ASSERT_EQUALS(idRequest->getDocumentsAt(0).nFields(), 2);
+ ASSERT(!idRequest->getDocumentsAt(1)["_id"].eoo());
+ ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
+}
+
+TEST(GenID, Partial) {
+ BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
+ BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
+
+ request.setCollName("foo.bar");
+ request.setOrdered(false);
+
+ BSONObj insertA = BSON("a" << 1);
+ BSONObj insertB = BSON("b" << 1 << "_id" << 1);
+ BSONObj insertC = BSON("c" << 1);
+ request.addToDocuments(insertA);
+ request.addToDocuments(insertB);
+ request.addToDocuments(insertC);
+
+ unique_ptr<BatchedCommandRequest> idCmdRequest;
+ idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
+ ASSERT(idCmdRequest.get());
+
+ BatchedInsertRequest* idRequest = idCmdRequest->getInsertRequest();
+ ASSERT_EQUALS(idRequest->getCollName(), request.getCollName());
+ ASSERT_EQUALS(idRequest->getOrdered(), request.getOrdered());
+
+ ASSERT(!idRequest->getDocumentsAt(0)["_id"].eoo());
+ ASSERT_EQUALS(idRequest->getDocumentsAt(0).nFields(), 2);
+ ASSERT(!idRequest->getDocumentsAt(1)["_id"].eoo());
+ ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
+ ASSERT(!idRequest->getDocumentsAt(2)["_id"].eoo());
+ ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
+}
+
+TEST(GenID, None) {
+ BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
+ BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
+
+ // We need to check for system.indexes namespace
+ request.setCollName("foo.bar");
+
+ BSONObj insertA = BSON("_id" << 0 << "a" << 1);
+ BSONObj insertB = BSON("b" << 1 << "_id" << 1);
+ request.addToDocuments(insertA);
+ request.addToDocuments(insertB);
+
+ unique_ptr<BatchedCommandRequest> idCmdRequest;
+ idCmdRequest.reset(BatchedCommandRequest::cloneWithIds(cmdRequest));
+ ASSERT(!idCmdRequest.get());
+}
+
+
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_request_metadata.cpp b/src/mongo/s/write_ops/batched_request_metadata.cpp
index 29d4b5e97fc..a2e21f99e4c 100644
--- a/src/mongo/s/write_ops/batched_request_metadata.cpp
+++ b/src/mongo/s/write_ops/batched_request_metadata.cpp
@@ -32,146 +32,147 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
+using std::unique_ptr;
+using std::string;
- const BSONField<string> BatchedRequestMetadata::shardName("shardName");
- const BSONField<ChunkVersion> BatchedRequestMetadata::shardVersion("shardVersion");
- const BSONField<long long> BatchedRequestMetadata::session("session");
+const BSONField<string> BatchedRequestMetadata::shardName("shardName");
+const BSONField<ChunkVersion> BatchedRequestMetadata::shardVersion("shardVersion");
+const BSONField<long long> BatchedRequestMetadata::session("session");
- BatchedRequestMetadata::BatchedRequestMetadata():
- _isShardNameSet(false),
- _session(0),
- _isSessionSet(false) {
- }
-
- BatchedRequestMetadata::~BatchedRequestMetadata() {
-
- }
-
- bool BatchedRequestMetadata::isValid(string* errMsg) const {
- // all fields are mandatory.
- return true;
- }
+BatchedRequestMetadata::BatchedRequestMetadata()
+ : _isShardNameSet(false), _session(0), _isSessionSet(false) {}
- BSONObj BatchedRequestMetadata::toBSON() const {
- BSONObjBuilder metadataBuilder;
+BatchedRequestMetadata::~BatchedRequestMetadata() {}
- if (_isShardNameSet) metadataBuilder << shardName(_shardName);
+bool BatchedRequestMetadata::isValid(string* errMsg) const {
+ // all fields are mandatory.
+ return true;
+}
- if (_shardVersion.get()) {
- // ChunkVersion wants to be an array.
- metadataBuilder.append(shardVersion(),
- static_cast<BSONArray>(_shardVersion->toBSON()));
- }
+BSONObj BatchedRequestMetadata::toBSON() const {
+ BSONObjBuilder metadataBuilder;
- if (_isSessionSet) metadataBuilder << session(_session);
+ if (_isShardNameSet)
+ metadataBuilder << shardName(_shardName);
- return metadataBuilder.obj();
+ if (_shardVersion.get()) {
+ // ChunkVersion wants to be an array.
+ metadataBuilder.append(shardVersion(), static_cast<BSONArray>(_shardVersion->toBSON()));
}
- bool BatchedRequestMetadata::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+ if (_isSessionSet)
+ metadataBuilder << session(_session);
- string dummy;
- if (!errMsg) errMsg = &dummy;
+ return metadataBuilder.obj();
+}
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, shardName, &_shardName, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isShardNameSet = fieldState == FieldParser::FIELD_SET;
+bool BatchedRequestMetadata::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- {
- std::unique_ptr<ChunkVersion> tempChunkVersion(new ChunkVersion);
- fieldState = FieldParser::extract(source, shardVersion,
- tempChunkVersion.get(), errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- if (fieldState == FieldParser::FIELD_SET) _shardVersion.swap(tempChunkVersion);
- }
+ string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- fieldState = FieldParser::extract(source, session, &_session, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isSessionSet = fieldState == FieldParser::FIELD_SET;
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, shardName, &_shardName, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isShardNameSet = fieldState == FieldParser::FIELD_SET;
- return true;
+ {
+ std::unique_ptr<ChunkVersion> tempChunkVersion(new ChunkVersion);
+ fieldState = FieldParser::extract(source, shardVersion, tempChunkVersion.get(), errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ if (fieldState == FieldParser::FIELD_SET)
+ _shardVersion.swap(tempChunkVersion);
}
- void BatchedRequestMetadata::clear() {
- _shardName.clear();
- _isShardNameSet = false;
+ fieldState = FieldParser::extract(source, session, &_session, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isSessionSet = fieldState == FieldParser::FIELD_SET;
- _shardVersion.reset();
+ return true;
+}
- _session = 0;
- _isSessionSet = false;
- }
+void BatchedRequestMetadata::clear() {
+ _shardName.clear();
+ _isShardNameSet = false;
- string BatchedRequestMetadata::toString() const {
- return toBSON().toString();
- }
+ _shardVersion.reset();
- void BatchedRequestMetadata::cloneTo(BatchedRequestMetadata* other) const {
- other->_shardName = _shardName;
- other->_isShardNameSet = _isShardNameSet;
+ _session = 0;
+ _isSessionSet = false;
+}
- if (other->_shardVersion.get()) _shardVersion->cloneTo(other->_shardVersion.get());
+string BatchedRequestMetadata::toString() const {
+ return toBSON().toString();
+}
- other->_session = _session;
- other->_isSessionSet = _isSessionSet;
- }
+void BatchedRequestMetadata::cloneTo(BatchedRequestMetadata* other) const {
+ other->_shardName = _shardName;
+ other->_isShardNameSet = _isShardNameSet;
- void BatchedRequestMetadata::setShardName( StringData shardName ) {
- _shardName = shardName.toString();
- _isShardNameSet = true;
- }
+ if (other->_shardVersion.get())
+ _shardVersion->cloneTo(other->_shardVersion.get());
- void BatchedRequestMetadata::unsetShardName() {
- _isShardNameSet = false;
- }
+ other->_session = _session;
+ other->_isSessionSet = _isSessionSet;
+}
- bool BatchedRequestMetadata::isShardNameSet() const {
- return _isShardNameSet;
- }
+void BatchedRequestMetadata::setShardName(StringData shardName) {
+ _shardName = shardName.toString();
+ _isShardNameSet = true;
+}
- const string& BatchedRequestMetadata::getShardName() const {
- dassert( _isShardNameSet );
- return _shardName;
- }
+void BatchedRequestMetadata::unsetShardName() {
+ _isShardNameSet = false;
+}
- void BatchedRequestMetadata::setShardVersion(const ChunkVersion& shardVersion) {
- unique_ptr<ChunkVersion> temp(new ChunkVersion);
- shardVersion.cloneTo(temp.get());
- _shardVersion.reset(temp.release());
- }
+bool BatchedRequestMetadata::isShardNameSet() const {
+ return _isShardNameSet;
+}
- void BatchedRequestMetadata::unsetShardVersion() {
- _shardVersion.reset();
- }
+const string& BatchedRequestMetadata::getShardName() const {
+ dassert(_isShardNameSet);
+ return _shardName;
+}
- bool BatchedRequestMetadata::isShardVersionSet() const {
- return _shardVersion.get() != NULL;
- }
+void BatchedRequestMetadata::setShardVersion(const ChunkVersion& shardVersion) {
+ unique_ptr<ChunkVersion> temp(new ChunkVersion);
+ shardVersion.cloneTo(temp.get());
+ _shardVersion.reset(temp.release());
+}
- const ChunkVersion& BatchedRequestMetadata::getShardVersion() const {
- dassert(_shardVersion.get());
- return *_shardVersion;
- }
+void BatchedRequestMetadata::unsetShardVersion() {
+ _shardVersion.reset();
+}
- void BatchedRequestMetadata::setSession(long long session) {
- _session = session;
- _isSessionSet = true;
- }
+bool BatchedRequestMetadata::isShardVersionSet() const {
+ return _shardVersion.get() != NULL;
+}
- void BatchedRequestMetadata::unsetSession() {
- _isSessionSet = false;
- }
+const ChunkVersion& BatchedRequestMetadata::getShardVersion() const {
+ dassert(_shardVersion.get());
+ return *_shardVersion;
+}
- bool BatchedRequestMetadata::isSessionSet() const {
- return _isSessionSet;
- }
+void BatchedRequestMetadata::setSession(long long session) {
+ _session = session;
+ _isSessionSet = true;
+}
- long long BatchedRequestMetadata::getSession() const {
- dassert(_isSessionSet);
- return _session;
- }
+void BatchedRequestMetadata::unsetSession() {
+ _isSessionSet = false;
+}
+
+bool BatchedRequestMetadata::isSessionSet() const {
+ return _isSessionSet;
+}
+
+long long BatchedRequestMetadata::getSession() const {
+ dassert(_isSessionSet);
+ return _session;
+}
}
diff --git a/src/mongo/s/write_ops/batched_request_metadata.h b/src/mongo/s/write_ops/batched_request_metadata.h
index afe639adfaa..6e31cb60713 100644
--- a/src/mongo/s/write_ops/batched_request_metadata.h
+++ b/src/mongo/s/write_ops/batched_request_metadata.h
@@ -36,59 +36,58 @@
#include "mongo/s/chunk_version.h"
namespace mongo {
- class BatchedRequestMetadata : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedRequestMetadata);
- public:
-
- static const BSONField<std::string> shardName;
- static const BSONField<ChunkVersion> shardVersion;
- static const BSONField<long long> session;
-
- BatchedRequestMetadata();
- virtual ~BatchedRequestMetadata();
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- void cloneTo(BatchedRequestMetadata* other) const;
-
- //
- // individual field accessors
- //
-
- void setShardName(StringData shardName);
- void unsetShardName();
- bool isShardNameSet() const;
- const std::string& getShardName() const;
-
- void setShardVersion(const ChunkVersion& shardVersion);
- void unsetShardVersion();
- bool isShardVersionSet() const;
- const ChunkVersion& getShardVersion() const;
-
- void setSession(long long session);
- void unsetSession();
- bool isSessionSet() const;
- long long getSession() const;
-
- private:
-
- // (O) shard name we're sending this batch to
- std::string _shardName;
- bool _isShardNameSet;
-
- // (O) version for this collection on a given shard
- std::unique_ptr<ChunkVersion> _shardVersion;
-
- // (O) session number the inserts belong to
- long long _session;
- bool _isSessionSet;
- };
+class BatchedRequestMetadata : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedRequestMetadata);
+
+public:
+ static const BSONField<std::string> shardName;
+ static const BSONField<ChunkVersion> shardVersion;
+ static const BSONField<long long> session;
+
+ BatchedRequestMetadata();
+ virtual ~BatchedRequestMetadata();
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ void cloneTo(BatchedRequestMetadata* other) const;
+
+ //
+ // individual field accessors
+ //
+
+ void setShardName(StringData shardName);
+ void unsetShardName();
+ bool isShardNameSet() const;
+ const std::string& getShardName() const;
+
+ void setShardVersion(const ChunkVersion& shardVersion);
+ void unsetShardVersion();
+ bool isShardVersionSet() const;
+ const ChunkVersion& getShardVersion() const;
+
+ void setSession(long long session);
+ void unsetSession();
+ bool isSessionSet() const;
+ long long getSession() const;
+
+private:
+ // (O) shard name we're sending this batch to
+ std::string _shardName;
+ bool _isShardNameSet;
+
+ // (O) version for this collection on a given shard
+ std::unique_ptr<ChunkVersion> _shardVersion;
+
+ // (O) session number the inserts belong to
+ long long _session;
+ bool _isSessionSet;
+};
}
diff --git a/src/mongo/s/write_ops/batched_request_metadata_test.cpp b/src/mongo/s/write_ops/batched_request_metadata_test.cpp
index 562497b5d27..e0ca03231cc 100644
--- a/src/mongo/s/write_ops/batched_request_metadata_test.cpp
+++ b/src/mongo/s/write_ops/batched_request_metadata_test.cpp
@@ -22,32 +22,32 @@
namespace {
- using mongo::BSONArray;
- using mongo::BSONArrayBuilder;
- using mongo::BSONObj;
- using mongo::BatchedRequestMetadata;
- using mongo::OID;
- using mongo::Timestamp;
- using std::string;
-
- TEST(RoundTrip, Normal) {
- // The BSON_ARRAY macro doesn't support Timestamps.
- BSONArrayBuilder arrBuilder;
- arrBuilder.append(Timestamp(1,1));
- arrBuilder.append(OID::gen());
- BSONArray shardVersionArray = arrBuilder.arr();
-
- BSONObj metadataObj(BSON(BatchedRequestMetadata::shardName("shard0000") <<
- BatchedRequestMetadata::shardVersion() << shardVersionArray <<
- BatchedRequestMetadata::session(100)));
-
- string errMsg;
- BatchedRequestMetadata metadata;
- bool ok = metadata.parseBSON(metadataObj, &errMsg);
- ASSERT_TRUE(ok);
-
- BSONObj genMetadataObj = metadata.toBSON();
- ASSERT_EQUALS(0, genMetadataObj.woCompare(metadataObj));
- }
-
-} // unnamed namespace
+using mongo::BSONArray;
+using mongo::BSONArrayBuilder;
+using mongo::BSONObj;
+using mongo::BatchedRequestMetadata;
+using mongo::OID;
+using mongo::Timestamp;
+using std::string;
+
+TEST(RoundTrip, Normal) {
+ // The BSON_ARRAY macro doesn't support Timestamps.
+ BSONArrayBuilder arrBuilder;
+ arrBuilder.append(Timestamp(1, 1));
+ arrBuilder.append(OID::gen());
+ BSONArray shardVersionArray = arrBuilder.arr();
+
+ BSONObj metadataObj(BSON(BatchedRequestMetadata::shardName("shard0000")
+ << BatchedRequestMetadata::shardVersion() << shardVersionArray
+ << BatchedRequestMetadata::session(100)));
+
+ string errMsg;
+ BatchedRequestMetadata metadata;
+ bool ok = metadata.parseBSON(metadataObj, &errMsg);
+ ASSERT_TRUE(ok);
+
+ BSONObj genMetadataObj = metadata.toBSON();
+ ASSERT_EQUALS(0, genMetadataObj.woCompare(metadataObj));
+}
+
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_update_document.cpp b/src/mongo/s/write_ops/batched_update_document.cpp
index ffebede8787..7e388a78b32 100644
--- a/src/mongo/s/write_ops/batched_update_document.cpp
+++ b/src/mongo/s/write_ops/batched_update_document.cpp
@@ -33,207 +33,209 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
+using mongoutils::str::stream;
- const BSONField<BSONObj> BatchedUpdateDocument::query("q");
- const BSONField<BSONObj> BatchedUpdateDocument::updateExpr("u");
- const BSONField<bool> BatchedUpdateDocument::multi("multi", false);
- const BSONField<bool> BatchedUpdateDocument::upsert("upsert", false);
+const BSONField<BSONObj> BatchedUpdateDocument::query("q");
+const BSONField<BSONObj> BatchedUpdateDocument::updateExpr("u");
+const BSONField<bool> BatchedUpdateDocument::multi("multi", false);
+const BSONField<bool> BatchedUpdateDocument::upsert("upsert", false);
- BatchedUpdateDocument::BatchedUpdateDocument() {
- clear();
+BatchedUpdateDocument::BatchedUpdateDocument() {
+ clear();
+}
+
+BatchedUpdateDocument::~BatchedUpdateDocument() {}
+
+bool BatchedUpdateDocument::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BatchedUpdateDocument::~BatchedUpdateDocument() {
+ // All the mandatory fields must be present.
+ if (!_isQuerySet) {
+ *errMsg = stream() << "missing " << query.name() << " field";
+ return false;
}
- bool BatchedUpdateDocument::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+ if (!_isUpdateExprSet) {
+ *errMsg = stream() << "missing " << updateExpr.name() << " field";
+ return false;
+ }
- // All the mandatory fields must be present.
- if (!_isQuerySet) {
- *errMsg = stream() << "missing " << query.name() << " field";
- return false;
- }
+ return true;
+}
- if (!_isUpdateExprSet) {
- *errMsg = stream() << "missing " << updateExpr.name() << " field";
- return false;
- }
+BSONObj BatchedUpdateDocument::toBSON() const {
+ BSONObjBuilder builder;
- return true;
- }
+ if (_isQuerySet)
+ builder.append(query(), _query);
- BSONObj BatchedUpdateDocument::toBSON() const {
- BSONObjBuilder builder;
+ if (_isUpdateExprSet)
+ builder.append(updateExpr(), _updateExpr);
- if (_isQuerySet) builder.append(query(), _query);
+ if (_isMultiSet)
+ builder.append(multi(), _multi);
- if (_isUpdateExprSet) builder.append(updateExpr(), _updateExpr);
+ if (_isUpsertSet)
+ builder.append(upsert(), _upsert);
- if (_isMultiSet) builder.append(multi(), _multi);
+ return builder.obj();
+}
- if (_isUpsertSet) builder.append(upsert(), _upsert);
+bool BatchedUpdateDocument::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- return builder.obj();
- }
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- bool BatchedUpdateDocument::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
-
- BSONObjIterator it(source);
- while ( it.more() ) {
- BSONElement elem = it.next();
- StringData fieldName = elem.fieldNameStringData();
-
- if ( fieldName == query.name() ) {
- fieldState = FieldParser::extract(elem, query, &_query, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isQuerySet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == updateExpr.name() ) {
- fieldState = FieldParser::extract(elem, updateExpr, &_updateExpr, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isUpdateExprSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == multi.name() ) {
- fieldState = FieldParser::extract(elem, multi, &_multi, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isMultiSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == upsert.name() ) {
- fieldState = FieldParser::extract(elem, upsert, &_upsert, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isUpsertSet = fieldState == FieldParser::FIELD_SET;
- }
- }
+ FieldParser::FieldState fieldState;
- return true;
+ BSONObjIterator it(source);
+ while (it.more()) {
+ BSONElement elem = it.next();
+ StringData fieldName = elem.fieldNameStringData();
+
+ if (fieldName == query.name()) {
+ fieldState = FieldParser::extract(elem, query, &_query, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isQuerySet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == updateExpr.name()) {
+ fieldState = FieldParser::extract(elem, updateExpr, &_updateExpr, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isUpdateExprSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == multi.name()) {
+ fieldState = FieldParser::extract(elem, multi, &_multi, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isMultiSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == upsert.name()) {
+ fieldState = FieldParser::extract(elem, upsert, &_upsert, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isUpsertSet = fieldState == FieldParser::FIELD_SET;
+ }
}
- void BatchedUpdateDocument::clear() {
- _query = BSONObj();
- _isQuerySet = false;
+ return true;
+}
- _updateExpr = BSONObj();
- _isUpdateExprSet = false;
+void BatchedUpdateDocument::clear() {
+ _query = BSONObj();
+ _isQuerySet = false;
- _multi = false;
- _isMultiSet = false;
+ _updateExpr = BSONObj();
+ _isUpdateExprSet = false;
- _upsert = false;
- _isUpsertSet = false;
+ _multi = false;
+ _isMultiSet = false;
- }
+ _upsert = false;
+ _isUpsertSet = false;
+}
- void BatchedUpdateDocument::cloneTo(BatchedUpdateDocument* other) const {
- other->clear();
+void BatchedUpdateDocument::cloneTo(BatchedUpdateDocument* other) const {
+ other->clear();
- other->_query = _query;
- other->_isQuerySet = _isQuerySet;
+ other->_query = _query;
+ other->_isQuerySet = _isQuerySet;
- other->_updateExpr = _updateExpr;
- other->_isUpdateExprSet = _isUpdateExprSet;
+ other->_updateExpr = _updateExpr;
+ other->_isUpdateExprSet = _isUpdateExprSet;
- other->_multi = _multi;
- other->_isMultiSet = _isMultiSet;
+ other->_multi = _multi;
+ other->_isMultiSet = _isMultiSet;
- other->_upsert = _upsert;
- other->_isUpsertSet = _isUpsertSet;
- }
+ other->_upsert = _upsert;
+ other->_isUpsertSet = _isUpsertSet;
+}
- std::string BatchedUpdateDocument::toString() const {
- return toBSON().toString();
- }
+std::string BatchedUpdateDocument::toString() const {
+ return toBSON().toString();
+}
- void BatchedUpdateDocument::setQuery(const BSONObj& query) {
- _query = query.getOwned();
- _isQuerySet = true;
- }
+void BatchedUpdateDocument::setQuery(const BSONObj& query) {
+ _query = query.getOwned();
+ _isQuerySet = true;
+}
- void BatchedUpdateDocument::unsetQuery() {
- _isQuerySet = false;
- }
+void BatchedUpdateDocument::unsetQuery() {
+ _isQuerySet = false;
+}
- bool BatchedUpdateDocument::isQuerySet() const {
- return _isQuerySet;
- }
+bool BatchedUpdateDocument::isQuerySet() const {
+ return _isQuerySet;
+}
- const BSONObj& BatchedUpdateDocument::getQuery() const {
- dassert(_isQuerySet);
- return _query;
- }
+const BSONObj& BatchedUpdateDocument::getQuery() const {
+ dassert(_isQuerySet);
+ return _query;
+}
- void BatchedUpdateDocument::setUpdateExpr(const BSONObj& updateExpr) {
- _updateExpr = updateExpr.getOwned();
- _isUpdateExprSet = true;
- }
+void BatchedUpdateDocument::setUpdateExpr(const BSONObj& updateExpr) {
+ _updateExpr = updateExpr.getOwned();
+ _isUpdateExprSet = true;
+}
- void BatchedUpdateDocument::unsetUpdateExpr() {
- _isUpdateExprSet = false;
- }
+void BatchedUpdateDocument::unsetUpdateExpr() {
+ _isUpdateExprSet = false;
+}
- bool BatchedUpdateDocument::isUpdateExprSet() const {
- return _isUpdateExprSet;
- }
+bool BatchedUpdateDocument::isUpdateExprSet() const {
+ return _isUpdateExprSet;
+}
- const BSONObj& BatchedUpdateDocument::getUpdateExpr() const {
- dassert(_isUpdateExprSet);
- return _updateExpr;
- }
+const BSONObj& BatchedUpdateDocument::getUpdateExpr() const {
+ dassert(_isUpdateExprSet);
+ return _updateExpr;
+}
- void BatchedUpdateDocument::setMulti(bool multi) {
- _multi = multi;
- _isMultiSet = true;
- }
+void BatchedUpdateDocument::setMulti(bool multi) {
+ _multi = multi;
+ _isMultiSet = true;
+}
- void BatchedUpdateDocument::unsetMulti() {
- _isMultiSet = false;
- }
+void BatchedUpdateDocument::unsetMulti() {
+ _isMultiSet = false;
+}
- bool BatchedUpdateDocument::isMultiSet() const {
- return _isMultiSet;
- }
+bool BatchedUpdateDocument::isMultiSet() const {
+ return _isMultiSet;
+}
- bool BatchedUpdateDocument::getMulti() const {
- if (_isMultiSet) {
- return _multi;
- }
- else {
- return multi.getDefault();
- }
+bool BatchedUpdateDocument::getMulti() const {
+ if (_isMultiSet) {
+ return _multi;
+ } else {
+ return multi.getDefault();
}
+}
- void BatchedUpdateDocument::setUpsert(bool upsert) {
- _upsert = upsert;
- _isUpsertSet = true;
- }
+void BatchedUpdateDocument::setUpsert(bool upsert) {
+ _upsert = upsert;
+ _isUpsertSet = true;
+}
- void BatchedUpdateDocument::unsetUpsert() {
- _isUpsertSet = false;
- }
+void BatchedUpdateDocument::unsetUpsert() {
+ _isUpsertSet = false;
+}
- bool BatchedUpdateDocument::isUpsertSet() const {
- return _isUpsertSet;
- }
+bool BatchedUpdateDocument::isUpsertSet() const {
+ return _isUpsertSet;
+}
- bool BatchedUpdateDocument::getUpsert() const {
- if (_isUpsertSet) {
- return _upsert;
- }
- else {
- return upsert.getDefault();
- }
+bool BatchedUpdateDocument::getUpsert() const {
+ if (_isUpsertSet) {
+ return _upsert;
+ } else {
+ return upsert.getDefault();
}
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_update_document.h b/src/mongo/s/write_ops/batched_update_document.h
index 0830e6c7f3c..ed9b6bc275b 100644
--- a/src/mongo/s/write_ops/batched_update_document.h
+++ b/src/mongo/s/write_ops/batched_update_document.h
@@ -37,85 +37,85 @@
namespace mongo {
- /**
- * This class represents the layout and content of a update document runCommand,
- * in the request side.
- */
- class BatchedUpdateDocument : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedUpdateDocument);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<BSONObj> query;
- static const BSONField<BSONObj> updateExpr;
- static const BSONField<bool> multi;
- static const BSONField<bool> upsert;
-
- //
- // construction / destruction
- //
-
- BatchedUpdateDocument();
- virtual ~BatchedUpdateDocument();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedUpdateDocument* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setQuery(const BSONObj& query);
- void unsetQuery();
- bool isQuerySet() const;
- const BSONObj& getQuery() const;
-
- void setUpdateExpr(const BSONObj& updateExpr);
- void unsetUpdateExpr();
- bool isUpdateExprSet() const;
- const BSONObj& getUpdateExpr() const;
-
- void setMulti(bool multi);
- void unsetMulti();
- bool isMultiSet() const;
- bool getMulti() const;
-
- void setUpsert(bool upsert);
- void unsetUpsert();
- bool isUpsertSet() const;
- bool getUpsert() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) query whose result the update will manipulate
- BSONObj _query;
- bool _isQuerySet;
-
- // (M) the update expression itself
- BSONObj _updateExpr;
- bool _isUpdateExprSet;
-
- // (O) whether multiple documents are to be updated
- bool _multi;
- bool _isMultiSet;
-
- // (O) whether upserts are allowed
- bool _upsert;
- bool _isUpsertSet;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of a update document runCommand,
+ * in the request side.
+ */
+class BatchedUpdateDocument : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedUpdateDocument);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<BSONObj> query;
+ static const BSONField<BSONObj> updateExpr;
+ static const BSONField<bool> multi;
+ static const BSONField<bool> upsert;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedUpdateDocument();
+ virtual ~BatchedUpdateDocument();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedUpdateDocument* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setQuery(const BSONObj& query);
+ void unsetQuery();
+ bool isQuerySet() const;
+ const BSONObj& getQuery() const;
+
+ void setUpdateExpr(const BSONObj& updateExpr);
+ void unsetUpdateExpr();
+ bool isUpdateExprSet() const;
+ const BSONObj& getUpdateExpr() const;
+
+ void setMulti(bool multi);
+ void unsetMulti();
+ bool isMultiSet() const;
+ bool getMulti() const;
+
+ void setUpsert(bool upsert);
+ void unsetUpsert();
+ bool isUpsertSet() const;
+ bool getUpsert() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) query whose result the update will manipulate
+ BSONObj _query;
+ bool _isQuerySet;
+
+ // (M) the update expression itself
+ BSONObj _updateExpr;
+ bool _isUpdateExprSet;
+
+ // (O) whether multiple documents are to be updated
+ bool _multi;
+ bool _isMultiSet;
+
+ // (O) whether upserts are allowed
+ bool _upsert;
+ bool _isUpsertSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_update_request.cpp b/src/mongo/s/write_ops/batched_update_request.cpp
index f2dda0be1de..0994cb04c6f 100644
--- a/src/mongo/s/write_ops/batched_update_request.cpp
+++ b/src/mongo/s/write_ops/batched_update_request.cpp
@@ -34,299 +34,303 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
-
- using mongoutils::str::stream;
-
- const std::string BatchedUpdateRequest::BATCHED_UPDATE_REQUEST = "update";
- const BSONField<std::string> BatchedUpdateRequest::collName("update");
- const BSONField<std::vector<BatchedUpdateDocument*> > BatchedUpdateRequest::updates("updates");
- const BSONField<BSONObj> BatchedUpdateRequest::writeConcern("writeConcern");
- const BSONField<bool> BatchedUpdateRequest::ordered("ordered", true);
- const BSONField<BSONObj> BatchedUpdateRequest::metadata("metadata");
-
- BatchedUpdateRequest::BatchedUpdateRequest() {
- clear();
+using std::unique_ptr;
+using std::string;
+
+using mongoutils::str::stream;
+
+const std::string BatchedUpdateRequest::BATCHED_UPDATE_REQUEST = "update";
+const BSONField<std::string> BatchedUpdateRequest::collName("update");
+const BSONField<std::vector<BatchedUpdateDocument*>> BatchedUpdateRequest::updates("updates");
+const BSONField<BSONObj> BatchedUpdateRequest::writeConcern("writeConcern");
+const BSONField<bool> BatchedUpdateRequest::ordered("ordered", true);
+const BSONField<BSONObj> BatchedUpdateRequest::metadata("metadata");
+
+BatchedUpdateRequest::BatchedUpdateRequest() {
+ clear();
+}
+
+BatchedUpdateRequest::~BatchedUpdateRequest() {
+ unsetUpdates();
+}
+
+bool BatchedUpdateRequest::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BatchedUpdateRequest::~BatchedUpdateRequest() {
- unsetUpdates();
+ // All the mandatory fields must be present.
+ if (!_isCollNameSet) {
+ *errMsg = stream() << "missing " << collName.name() << " field";
+ return false;
}
- bool BatchedUpdateRequest::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // All the mandatory fields must be present.
- if (!_isCollNameSet) {
- *errMsg = stream() << "missing " << collName.name() << " field";
- return false;
- }
-
- if (!_isUpdatesSet) {
- *errMsg = stream() << "missing " << updates.name() << " field";
- return false;
- }
-
- return true;
+ if (!_isUpdatesSet) {
+ *errMsg = stream() << "missing " << updates.name() << " field";
+ return false;
}
- BSONObj BatchedUpdateRequest::toBSON() const {
- BSONObjBuilder builder;
+ return true;
+}
- if (_isCollNameSet) builder.append(collName(), _collName);
+BSONObj BatchedUpdateRequest::toBSON() const {
+ BSONObjBuilder builder;
- if (_isUpdatesSet) {
- BSONArrayBuilder updatesBuilder(builder.subarrayStart(updates()));
- for (std::vector<BatchedUpdateDocument*>::const_iterator it = _updates.begin();
- it != _updates.end();
- ++it) {
- BSONObj updateDocument = (*it)->toBSON();
- updatesBuilder.append(updateDocument);
- }
- updatesBuilder.done();
- }
-
- if (_isWriteConcernSet) builder.append(writeConcern(), _writeConcern);
-
- if (_isOrderedSet) builder.append(ordered(), _ordered);
-
- if (_metadata) builder.append(metadata(), _metadata->toBSON());
-
- if (_shouldBypassValidation) builder.append(bypassDocumentValidationCommandOption(), true);
+ if (_isCollNameSet)
+ builder.append(collName(), _collName);
- return builder.obj();
+ if (_isUpdatesSet) {
+ BSONArrayBuilder updatesBuilder(builder.subarrayStart(updates()));
+ for (std::vector<BatchedUpdateDocument*>::const_iterator it = _updates.begin();
+ it != _updates.end();
+ ++it) {
+ BSONObj updateDocument = (*it)->toBSON();
+ updatesBuilder.append(updateDocument);
+ }
+ updatesBuilder.done();
}
- bool BatchedUpdateRequest::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
-
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
-
- FieldParser::FieldState fieldState;
-
- BSONObjIterator it( source );
- while ( it.more() ) {
- const BSONElement& elem = it.next();
- StringData fieldName = elem.fieldNameStringData();
-
- if ( fieldName == collName.name() ) {
- std::string collNameTemp;
- fieldState = FieldParser::extract(elem, collName, &collNameTemp, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _collName = NamespaceString(collNameTemp);
- _isCollNameSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == updates.name() ) {
- fieldState = FieldParser::extract(elem, updates, &_updates, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isUpdatesSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == writeConcern.name() ) {
- fieldState = FieldParser::extract(elem, writeConcern, &_writeConcern, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == ordered.name() ) {
- fieldState = FieldParser::extract(elem, ordered, &_ordered, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isOrderedSet = fieldState == FieldParser::FIELD_SET;
- }
- else if ( fieldName == metadata.name() ) {
- BSONObj metadataObj;
- fieldState = FieldParser::extract(elem, metadata, &metadataObj, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
-
- if (!metadataObj.isEmpty()) {
- _metadata.reset(new BatchedRequestMetadata());
- if (!_metadata->parseBSON(metadataObj, errMsg)) {
- return false;
- }
+ if (_isWriteConcernSet)
+ builder.append(writeConcern(), _writeConcern);
+
+ if (_isOrderedSet)
+ builder.append(ordered(), _ordered);
+
+ if (_metadata)
+ builder.append(metadata(), _metadata->toBSON());
+
+ if (_shouldBypassValidation)
+ builder.append(bypassDocumentValidationCommandOption(), true);
+
+ return builder.obj();
+}
+
+bool BatchedUpdateRequest::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
+
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
+
+ FieldParser::FieldState fieldState;
+
+ BSONObjIterator it(source);
+ while (it.more()) {
+ const BSONElement& elem = it.next();
+ StringData fieldName = elem.fieldNameStringData();
+
+ if (fieldName == collName.name()) {
+ std::string collNameTemp;
+ fieldState = FieldParser::extract(elem, collName, &collNameTemp, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _collName = NamespaceString(collNameTemp);
+ _isCollNameSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == updates.name()) {
+ fieldState = FieldParser::extract(elem, updates, &_updates, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isUpdatesSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == writeConcern.name()) {
+ fieldState = FieldParser::extract(elem, writeConcern, &_writeConcern, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isWriteConcernSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == ordered.name()) {
+ fieldState = FieldParser::extract(elem, ordered, &_ordered, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isOrderedSet = fieldState == FieldParser::FIELD_SET;
+ } else if (fieldName == metadata.name()) {
+ BSONObj metadataObj;
+ fieldState = FieldParser::extract(elem, metadata, &metadataObj, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+
+ if (!metadataObj.isEmpty()) {
+ _metadata.reset(new BatchedRequestMetadata());
+ if (!_metadata->parseBSON(metadataObj, errMsg)) {
+ return false;
}
}
- else if (fieldName == bypassDocumentValidationCommandOption()) {
- _shouldBypassValidation = elem.trueValue();
- }
- }
- return true;
- }
-
- void BatchedUpdateRequest::clear() {
- _collName = NamespaceString();
- _isCollNameSet = false;
-
- unsetUpdates();
-
- _writeConcern = BSONObj();
- _isWriteConcernSet = false;
-
- _ordered = false;
- _isOrderedSet = false;
-
- _shouldBypassValidation = false;
-
- _metadata.reset();
- }
-
- void BatchedUpdateRequest::cloneTo(BatchedUpdateRequest* other) const {
- other->clear();
-
- other->_collName = _collName;
- other->_isCollNameSet = _isCollNameSet;
-
- for(std::vector<BatchedUpdateDocument*>::const_iterator it = _updates.begin();
- it != _updates.end();
- ++it) {
- unique_ptr<BatchedUpdateDocument> tempBatchUpdateDocument(new BatchedUpdateDocument);
- (*it)->cloneTo(tempBatchUpdateDocument.get());
- other->addToUpdates(tempBatchUpdateDocument.release());
- }
- other->_isUpdatesSet = _isUpdatesSet;
-
- other->_writeConcern = _writeConcern;
- other->_isWriteConcernSet = _isWriteConcernSet;
-
- other->_ordered = _ordered;
- other->_isOrderedSet = _isOrderedSet;
-
- if (_metadata) {
- other->_metadata.reset(new BatchedRequestMetadata());
- _metadata->cloneTo(other->_metadata.get());
+ } else if (fieldName == bypassDocumentValidationCommandOption()) {
+ _shouldBypassValidation = elem.trueValue();
}
}
+ return true;
+}
- std::string BatchedUpdateRequest::toString() const {
- return toBSON().toString();
- }
+void BatchedUpdateRequest::clear() {
+ _collName = NamespaceString();
+ _isCollNameSet = false;
- void BatchedUpdateRequest::setCollName(StringData collName) {
- _collName = NamespaceString(collName);
- _isCollNameSet = true;
- }
+ unsetUpdates();
- const std::string& BatchedUpdateRequest::getCollName() const {
- dassert(_isCollNameSet);
- return _collName.ns();
- }
+ _writeConcern = BSONObj();
+ _isWriteConcernSet = false;
- void BatchedUpdateRequest::setCollNameNS(const NamespaceString& collName) {
- _collName = collName;
- _isCollNameSet = true;
- }
+ _ordered = false;
+ _isOrderedSet = false;
- const NamespaceString& BatchedUpdateRequest::getCollNameNS() const {
- dassert(_isCollNameSet);
- return _collName;
- }
+ _shouldBypassValidation = false;
- const NamespaceString& BatchedUpdateRequest::getTargetingNSS() const {
- return getCollNameNS();
- }
+ _metadata.reset();
+}
- void BatchedUpdateRequest::setUpdates(const std::vector<BatchedUpdateDocument*>& updates) {
- unsetUpdates();
- for (std::vector<BatchedUpdateDocument*>::const_iterator it = updates.begin();
- it != updates.end();
- ++it) {
- unique_ptr<BatchedUpdateDocument> tempBatchUpdateDocument(new BatchedUpdateDocument);
- (*it)->cloneTo(tempBatchUpdateDocument.get());
- addToUpdates(tempBatchUpdateDocument.release());
- }
- _isUpdatesSet = updates.size() > 0;
- }
+void BatchedUpdateRequest::cloneTo(BatchedUpdateRequest* other) const {
+ other->clear();
- void BatchedUpdateRequest::addToUpdates(BatchedUpdateDocument* updates) {
- _updates.push_back(updates);
- _isUpdatesSet = true;
- }
+ other->_collName = _collName;
+ other->_isCollNameSet = _isCollNameSet;
- void BatchedUpdateRequest::unsetUpdates() {
- for(std::vector<BatchedUpdateDocument*>::iterator it = _updates.begin();
- it != _updates.end();
- ++it) {
- delete *it;
- }
- _updates.clear();
- _isUpdatesSet = false;
+ for (std::vector<BatchedUpdateDocument*>::const_iterator it = _updates.begin();
+ it != _updates.end();
+ ++it) {
+ unique_ptr<BatchedUpdateDocument> tempBatchUpdateDocument(new BatchedUpdateDocument);
+ (*it)->cloneTo(tempBatchUpdateDocument.get());
+ other->addToUpdates(tempBatchUpdateDocument.release());
}
+ other->_isUpdatesSet = _isUpdatesSet;
- bool BatchedUpdateRequest::isUpdatesSet() const {
- return _isUpdatesSet;
- }
+ other->_writeConcern = _writeConcern;
+ other->_isWriteConcernSet = _isWriteConcernSet;
- size_t BatchedUpdateRequest::sizeUpdates() const {
- return _updates.size();
- }
+ other->_ordered = _ordered;
+ other->_isOrderedSet = _isOrderedSet;
- const std::vector<BatchedUpdateDocument*>& BatchedUpdateRequest::getUpdates() const {
- dassert(_isUpdatesSet);
- return _updates;
+ if (_metadata) {
+ other->_metadata.reset(new BatchedRequestMetadata());
+ _metadata->cloneTo(other->_metadata.get());
}
-
- const BatchedUpdateDocument* BatchedUpdateRequest::getUpdatesAt(size_t pos) const {
- dassert(_isUpdatesSet);
- dassert(_updates.size() > pos);
- return _updates.at(pos);
+}
+
+std::string BatchedUpdateRequest::toString() const {
+ return toBSON().toString();
+}
+
+void BatchedUpdateRequest::setCollName(StringData collName) {
+ _collName = NamespaceString(collName);
+ _isCollNameSet = true;
+}
+
+const std::string& BatchedUpdateRequest::getCollName() const {
+ dassert(_isCollNameSet);
+ return _collName.ns();
+}
+
+void BatchedUpdateRequest::setCollNameNS(const NamespaceString& collName) {
+ _collName = collName;
+ _isCollNameSet = true;
+}
+
+const NamespaceString& BatchedUpdateRequest::getCollNameNS() const {
+ dassert(_isCollNameSet);
+ return _collName;
+}
+
+const NamespaceString& BatchedUpdateRequest::getTargetingNSS() const {
+ return getCollNameNS();
+}
+
+void BatchedUpdateRequest::setUpdates(const std::vector<BatchedUpdateDocument*>& updates) {
+ unsetUpdates();
+ for (std::vector<BatchedUpdateDocument*>::const_iterator it = updates.begin();
+ it != updates.end();
+ ++it) {
+ unique_ptr<BatchedUpdateDocument> tempBatchUpdateDocument(new BatchedUpdateDocument);
+ (*it)->cloneTo(tempBatchUpdateDocument.get());
+ addToUpdates(tempBatchUpdateDocument.release());
}
-
- void BatchedUpdateRequest::setWriteConcern(const BSONObj& writeConcern) {
- _writeConcern = writeConcern.getOwned();
- _isWriteConcernSet = true;
- }
-
- void BatchedUpdateRequest::unsetWriteConcern() {
- _isWriteConcernSet = false;
- }
-
- bool BatchedUpdateRequest::isWriteConcernSet() const {
- return _isWriteConcernSet;
- }
-
- const BSONObj& BatchedUpdateRequest::getWriteConcern() const {
- dassert(_isWriteConcernSet);
- return _writeConcern;
- }
-
- void BatchedUpdateRequest::setOrdered(bool ordered) {
- _ordered = ordered;
- _isOrderedSet = true;
- }
-
- void BatchedUpdateRequest::unsetOrdered() {
- _isOrderedSet = false;
- }
-
- bool BatchedUpdateRequest::isOrderedSet() const {
- return _isOrderedSet;
+ _isUpdatesSet = updates.size() > 0;
+}
+
+void BatchedUpdateRequest::addToUpdates(BatchedUpdateDocument* updates) {
+ _updates.push_back(updates);
+ _isUpdatesSet = true;
+}
+
+void BatchedUpdateRequest::unsetUpdates() {
+ for (std::vector<BatchedUpdateDocument*>::iterator it = _updates.begin(); it != _updates.end();
+ ++it) {
+ delete *it;
}
-
- bool BatchedUpdateRequest::getOrdered() const {
- if (_isOrderedSet) {
- return _ordered;
- }
- else {
- return ordered.getDefault();
- }
+ _updates.clear();
+ _isUpdatesSet = false;
+}
+
+bool BatchedUpdateRequest::isUpdatesSet() const {
+ return _isUpdatesSet;
+}
+
+size_t BatchedUpdateRequest::sizeUpdates() const {
+ return _updates.size();
+}
+
+const std::vector<BatchedUpdateDocument*>& BatchedUpdateRequest::getUpdates() const {
+ dassert(_isUpdatesSet);
+ return _updates;
+}
+
+const BatchedUpdateDocument* BatchedUpdateRequest::getUpdatesAt(size_t pos) const {
+ dassert(_isUpdatesSet);
+ dassert(_updates.size() > pos);
+ return _updates.at(pos);
+}
+
+void BatchedUpdateRequest::setWriteConcern(const BSONObj& writeConcern) {
+ _writeConcern = writeConcern.getOwned();
+ _isWriteConcernSet = true;
+}
+
+void BatchedUpdateRequest::unsetWriteConcern() {
+ _isWriteConcernSet = false;
+}
+
+bool BatchedUpdateRequest::isWriteConcernSet() const {
+ return _isWriteConcernSet;
+}
+
+const BSONObj& BatchedUpdateRequest::getWriteConcern() const {
+ dassert(_isWriteConcernSet);
+ return _writeConcern;
+}
+
+void BatchedUpdateRequest::setOrdered(bool ordered) {
+ _ordered = ordered;
+ _isOrderedSet = true;
+}
+
+void BatchedUpdateRequest::unsetOrdered() {
+ _isOrderedSet = false;
+}
+
+bool BatchedUpdateRequest::isOrderedSet() const {
+ return _isOrderedSet;
+}
+
+bool BatchedUpdateRequest::getOrdered() const {
+ if (_isOrderedSet) {
+ return _ordered;
+ } else {
+ return ordered.getDefault();
}
+}
- void BatchedUpdateRequest::setMetadata(BatchedRequestMetadata* metadata) {
- _metadata.reset(metadata);
- }
+void BatchedUpdateRequest::setMetadata(BatchedRequestMetadata* metadata) {
+ _metadata.reset(metadata);
+}
- void BatchedUpdateRequest::unsetMetadata() {
- _metadata.reset();
- }
+void BatchedUpdateRequest::unsetMetadata() {
+ _metadata.reset();
+}
- bool BatchedUpdateRequest::isMetadataSet() const {
- return _metadata.get();
- }
+bool BatchedUpdateRequest::isMetadataSet() const {
+ return _metadata.get();
+}
- BatchedRequestMetadata* BatchedUpdateRequest::getMetadata() const {
- return _metadata.get();
- }
+BatchedRequestMetadata* BatchedUpdateRequest::getMetadata() const {
+ return _metadata.get();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_update_request.h b/src/mongo/s/write_ops/batched_update_request.h
index db99188b7b3..6042f4c6cf2 100644
--- a/src/mongo/s/write_ops/batched_update_request.h
+++ b/src/mongo/s/write_ops/batched_update_request.h
@@ -40,116 +40,120 @@
namespace mongo {
+/**
+ * This class represents the layout and content of a batched update runCommand,
+ * the request side.
+ */
+class BatchedUpdateRequest : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedUpdateRequest);
+
+public:
+ //
+ // schema declarations
+ //
+
+ // Name used for the batched update invocation.
+ static const std::string BATCHED_UPDATE_REQUEST;
+
+ // Field names and types in the batched update command type.
+ static const BSONField<std::string> collName;
+ static const BSONField<std::vector<BatchedUpdateDocument*>> updates;
+ static const BSONField<BSONObj> writeConcern;
+ static const BSONField<bool> ordered;
+ static const BSONField<BSONObj> metadata;
+
+ //
+ // construction / destruction
+ //
+
+ BatchedUpdateRequest();
+ virtual ~BatchedUpdateRequest();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedUpdateRequest* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setCollName(StringData collName);
+ void setCollNameNS(const NamespaceString& collName);
+ const std::string& getCollName() const;
+ const NamespaceString& getCollNameNS() const;
+
+ const NamespaceString& getTargetingNSS() const;
+
+ void setUpdates(const std::vector<BatchedUpdateDocument*>& updates);
+
/**
- * This class represents the layout and content of a batched update runCommand,
- * the request side.
+ * updates ownership is transferred to here.
*/
- class BatchedUpdateRequest : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedUpdateRequest);
- public:
-
- //
- // schema declarations
- //
-
- // Name used for the batched update invocation.
- static const std::string BATCHED_UPDATE_REQUEST;
-
- // Field names and types in the batched update command type.
- static const BSONField<std::string> collName;
- static const BSONField<std::vector<BatchedUpdateDocument*> > updates;
- static const BSONField<BSONObj> writeConcern;
- static const BSONField<bool> ordered;
- static const BSONField<BSONObj> metadata;
-
- //
- // construction / destruction
- //
-
- BatchedUpdateRequest();
- virtual ~BatchedUpdateRequest();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedUpdateRequest* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setCollName(StringData collName);
- void setCollNameNS(const NamespaceString& collName);
- const std::string& getCollName() const;
- const NamespaceString& getCollNameNS() const;
-
- const NamespaceString& getTargetingNSS() const;
-
- void setUpdates(const std::vector<BatchedUpdateDocument*>& updates);
-
- /**
- * updates ownership is transferred to here.
- */
- void addToUpdates(BatchedUpdateDocument* updates);
- void unsetUpdates();
- bool isUpdatesSet() const;
- std::size_t sizeUpdates() const;
- const std::vector<BatchedUpdateDocument*>& getUpdates() const;
- const BatchedUpdateDocument* getUpdatesAt(std::size_t pos) const;
-
- void setWriteConcern(const BSONObj& writeConcern);
- void unsetWriteConcern();
- bool isWriteConcernSet() const;
- const BSONObj& getWriteConcern() const;
-
- void setOrdered(bool ordered);
- void unsetOrdered();
- bool isOrderedSet() const;
- bool getOrdered() const;
-
- void setShouldBypassValidation(bool newVal) { _shouldBypassValidation = newVal; }
- bool shouldBypassValidation() const { return _shouldBypassValidation; }
-
- /*
- * metadata ownership will be transferred to this.
- */
- void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
- bool isMetadataSet() const;
- BatchedRequestMetadata* getMetadata() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) collection we're updating from
- NamespaceString _collName;
- bool _isCollNameSet;
-
- // (M) array of individual updates
- std::vector<BatchedUpdateDocument*> _updates;
- bool _isUpdatesSet;
-
- // (O) to be issued after the batch applied
- BSONObj _writeConcern;
- bool _isWriteConcernSet;
-
- // (O) whether batch is issued in parallel or not
- bool _ordered;
- bool _isOrderedSet;
-
- // (O) should document validation be bypassed (default false)
- bool _shouldBypassValidation;
-
- // (O) metadata associated with this request for internal use.
- std::unique_ptr<BatchedRequestMetadata> _metadata;
- };
-
-} // namespace mongo
+ void addToUpdates(BatchedUpdateDocument* updates);
+ void unsetUpdates();
+ bool isUpdatesSet() const;
+ std::size_t sizeUpdates() const;
+ const std::vector<BatchedUpdateDocument*>& getUpdates() const;
+ const BatchedUpdateDocument* getUpdatesAt(std::size_t pos) const;
+
+ void setWriteConcern(const BSONObj& writeConcern);
+ void unsetWriteConcern();
+ bool isWriteConcernSet() const;
+ const BSONObj& getWriteConcern() const;
+
+ void setOrdered(bool ordered);
+ void unsetOrdered();
+ bool isOrderedSet() const;
+ bool getOrdered() const;
+
+ void setShouldBypassValidation(bool newVal) {
+ _shouldBypassValidation = newVal;
+ }
+ bool shouldBypassValidation() const {
+ return _shouldBypassValidation;
+ }
+
+ /*
+ * metadata ownership will be transferred to this.
+ */
+ void setMetadata(BatchedRequestMetadata* metadata);
+ void unsetMetadata();
+ bool isMetadataSet() const;
+ BatchedRequestMetadata* getMetadata() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) collection we're updating from
+ NamespaceString _collName;
+ bool _isCollNameSet;
+
+ // (M) array of individual updates
+ std::vector<BatchedUpdateDocument*> _updates;
+ bool _isUpdatesSet;
+
+ // (O) to be issued after the batch applied
+ BSONObj _writeConcern;
+ bool _isWriteConcernSet;
+
+ // (O) whether batch is issued in parallel or not
+ bool _ordered;
+ bool _isOrderedSet;
+
+ // (O) should document validation be bypassed (default false)
+ bool _shouldBypassValidation;
+
+ // (O) metadata associated with this request for internal use.
+ std::unique_ptr<BatchedRequestMetadata> _metadata;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_update_request_test.cpp b/src/mongo/s/write_ops/batched_update_request_test.cpp
index c8e69b92b06..662bf079b9f 100644
--- a/src/mongo/s/write_ops/batched_update_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_update_request_test.cpp
@@ -36,56 +36,49 @@
namespace {
- using std::string;
- using mongo::BatchedUpdateDocument;
- using mongo::BatchedUpdateRequest;
- using mongo::BatchedRequestMetadata;
- using mongo::BSONArray;
- using mongo::BSONArrayBuilder;
- using mongo::BSONObj;
- using mongo::OID;
- using mongo::Timestamp;
+using std::string;
+using mongo::BatchedUpdateDocument;
+using mongo::BatchedUpdateRequest;
+using mongo::BatchedRequestMetadata;
+using mongo::BSONArray;
+using mongo::BSONArrayBuilder;
+using mongo::BSONObj;
+using mongo::OID;
+using mongo::Timestamp;
- TEST(RoundTrip, Normal) {
- BSONArray updateArray =
- BSON_ARRAY(
- BSON(BatchedUpdateDocument::query(BSON("a" << 1)) <<
- BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("a" << 1))) <<
- BatchedUpdateDocument::multi(false) <<
- BatchedUpdateDocument::upsert(false)
- ) <<
- BSON(BatchedUpdateDocument::query(BSON("b" << 1)) <<
- BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("b" << 2))) <<
- BatchedUpdateDocument::multi(false) <<
- BatchedUpdateDocument::upsert(false)
- )
- );
+TEST(RoundTrip, Normal) {
+ BSONArray updateArray = BSON_ARRAY(
+ BSON(BatchedUpdateDocument::query(BSON("a" << 1))
+ << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("a" << 1)))
+ << BatchedUpdateDocument::multi(false) << BatchedUpdateDocument::upsert(false))
+ << BSON(BatchedUpdateDocument::query(BSON("b" << 1))
+ << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("b" << 2)))
+ << BatchedUpdateDocument::multi(false) << BatchedUpdateDocument::upsert(false)));
- BSONObj writeConcernObj = BSON("w" << 1);
+ BSONObj writeConcernObj = BSON("w" << 1);
- // The BSON_ARRAY macro doesn't support Timestamps.
- BSONArrayBuilder arrBuilder;
- arrBuilder.append(Timestamp(1,1));
- arrBuilder.append(OID::gen());
- BSONArray shardVersionArray = arrBuilder.arr();
+ // The BSON_ARRAY macro doesn't support Timestamps.
+ BSONArrayBuilder arrBuilder;
+ arrBuilder.append(Timestamp(1, 1));
+ arrBuilder.append(OID::gen());
+ BSONArray shardVersionArray = arrBuilder.arr();
- BSONObj origUpdateRequestObj =
- BSON(BatchedUpdateRequest::collName("test") <<
- BatchedUpdateRequest::updates() << updateArray <<
- BatchedUpdateRequest::writeConcern(writeConcernObj) <<
- BatchedUpdateRequest::ordered(true) <<
- BatchedUpdateRequest::metadata() << BSON(
- BatchedRequestMetadata::shardName("shard0000") <<
- BatchedRequestMetadata::shardVersion() << shardVersionArray <<
- BatchedRequestMetadata::session(0)));
+ BSONObj origUpdateRequestObj =
+ BSON(BatchedUpdateRequest::collName("test")
+ << BatchedUpdateRequest::updates() << updateArray
+ << BatchedUpdateRequest::writeConcern(writeConcernObj)
+ << BatchedUpdateRequest::ordered(true) << BatchedUpdateRequest::metadata()
+ << BSON(BatchedRequestMetadata::shardName("shard0000")
+ << BatchedRequestMetadata::shardVersion() << shardVersionArray
+ << BatchedRequestMetadata::session(0)));
- string errMsg;
- BatchedUpdateRequest request;
- bool ok = request.parseBSON(origUpdateRequestObj, &errMsg);
- ASSERT_TRUE(ok);
+ string errMsg;
+ BatchedUpdateRequest request;
+ bool ok = request.parseBSON(origUpdateRequestObj, &errMsg);
+ ASSERT_TRUE(ok);
- BSONObj genUpdateRequestObj = request.toBSON();
- ASSERT_EQUALS(0, genUpdateRequestObj.woCompare(origUpdateRequestObj));
- }
+ BSONObj genUpdateRequestObj = request.toBSON();
+ ASSERT_EQUALS(0, genUpdateRequestObj.woCompare(origUpdateRequestObj));
+}
-} // unnamed namespace
+} // unnamed namespace
diff --git a/src/mongo/s/write_ops/batched_upsert_detail.cpp b/src/mongo/s/write_ops/batched_upsert_detail.cpp
index d6764a22c39..cbc26eadcef 100644
--- a/src/mongo/s/write_ops/batched_upsert_detail.cpp
+++ b/src/mongo/s/write_ops/batched_upsert_detail.cpp
@@ -33,128 +33,130 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
+using mongoutils::str::stream;
- const BSONField<int> BatchedUpsertDetail::index("index");
- const BSONField<BSONObj> BatchedUpsertDetail::upsertedID("_id");
+const BSONField<int> BatchedUpsertDetail::index("index");
+const BSONField<BSONObj> BatchedUpsertDetail::upsertedID("_id");
- BatchedUpsertDetail::BatchedUpsertDetail() {
- clear();
- }
-
- BatchedUpsertDetail::~BatchedUpsertDetail() {
- }
+BatchedUpsertDetail::BatchedUpsertDetail() {
+ clear();
+}
- bool BatchedUpsertDetail::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+BatchedUpsertDetail::~BatchedUpsertDetail() {}
- // All the mandatory fields must be present.
- if (!_isIndexSet) {
- *errMsg = stream() << "missing " << index.name() << " field";
- return false;
- }
+bool BatchedUpsertDetail::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- if (!_isUpsertedIDSet) {
- *errMsg = stream() << "missing " << upsertedID.name() << " field";
- return false;
- }
+ // All the mandatory fields must be present.
+ if (!_isIndexSet) {
+ *errMsg = stream() << "missing " << index.name() << " field";
+ return false;
+ }
- return true;
+ if (!_isUpsertedIDSet) {
+ *errMsg = stream() << "missing " << upsertedID.name() << " field";
+ return false;
}
- BSONObj BatchedUpsertDetail::toBSON() const {
- BSONObjBuilder builder;
+ return true;
+}
- if (_isIndexSet) builder.append(index(), _index);
+BSONObj BatchedUpsertDetail::toBSON() const {
+ BSONObjBuilder builder;
- // We're using the BSONObj to store the _id value.
- if (_isUpsertedIDSet) {
- builder.appendAs(_upsertedID.firstElement(), upsertedID());
- }
+ if (_isIndexSet)
+ builder.append(index(), _index);
- return builder.obj();
+ // We're using the BSONObj to store the _id value.
+ if (_isUpsertedIDSet) {
+ builder.appendAs(_upsertedID.firstElement(), upsertedID());
}
- bool BatchedUpsertDetail::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+ return builder.obj();
+}
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+bool BatchedUpsertDetail::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, index, &_index, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isIndexSet = fieldState == FieldParser::FIELD_SET;
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- fieldState = FieldParser::extractID(source, upsertedID, &_upsertedID, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isUpsertedIDSet = fieldState == FieldParser::FIELD_SET;
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, index, &_index, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isIndexSet = fieldState == FieldParser::FIELD_SET;
- return true;
- }
+ fieldState = FieldParser::extractID(source, upsertedID, &_upsertedID, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isUpsertedIDSet = fieldState == FieldParser::FIELD_SET;
- void BatchedUpsertDetail::clear() {
- _index = 0;
- _isIndexSet = false;
+ return true;
+}
- _upsertedID = BSONObj();
- _isUpsertedIDSet = false;
+void BatchedUpsertDetail::clear() {
+ _index = 0;
+ _isIndexSet = false;
- }
+ _upsertedID = BSONObj();
+ _isUpsertedIDSet = false;
+}
- void BatchedUpsertDetail::cloneTo(BatchedUpsertDetail* other) const {
- other->clear();
+void BatchedUpsertDetail::cloneTo(BatchedUpsertDetail* other) const {
+ other->clear();
- other->_index = _index;
- other->_isIndexSet = _isIndexSet;
+ other->_index = _index;
+ other->_isIndexSet = _isIndexSet;
- other->_upsertedID = _upsertedID;
- other->_isUpsertedIDSet = _isUpsertedIDSet;
- }
+ other->_upsertedID = _upsertedID;
+ other->_isUpsertedIDSet = _isUpsertedIDSet;
+}
- std::string BatchedUpsertDetail::toString() const {
- return "implement me";
- }
+std::string BatchedUpsertDetail::toString() const {
+ return "implement me";
+}
- void BatchedUpsertDetail::setIndex(int index) {
- _index = index;
- _isIndexSet = true;
- }
+void BatchedUpsertDetail::setIndex(int index) {
+ _index = index;
+ _isIndexSet = true;
+}
- void BatchedUpsertDetail::unsetIndex() {
- _isIndexSet = false;
- }
+void BatchedUpsertDetail::unsetIndex() {
+ _isIndexSet = false;
+}
- bool BatchedUpsertDetail::isIndexSet() const {
- return _isIndexSet;
- }
+bool BatchedUpsertDetail::isIndexSet() const {
+ return _isIndexSet;
+}
- int BatchedUpsertDetail::getIndex() const {
- dassert(_isIndexSet);
- return _index;
- }
+int BatchedUpsertDetail::getIndex() const {
+ dassert(_isIndexSet);
+ return _index;
+}
- void BatchedUpsertDetail::setUpsertedID(const BSONObj& upsertedID) {
- _upsertedID = upsertedID.firstElement().wrap( "" ).getOwned();
- _isUpsertedIDSet = true;
- }
+void BatchedUpsertDetail::setUpsertedID(const BSONObj& upsertedID) {
+ _upsertedID = upsertedID.firstElement().wrap("").getOwned();
+ _isUpsertedIDSet = true;
+}
- void BatchedUpsertDetail::unsetUpsertedID() {
- _isUpsertedIDSet = false;
- }
+void BatchedUpsertDetail::unsetUpsertedID() {
+ _isUpsertedIDSet = false;
+}
- bool BatchedUpsertDetail::isUpsertedIDSet() const {
- return _isUpsertedIDSet;
- }
+bool BatchedUpsertDetail::isUpsertedIDSet() const {
+ return _isUpsertedIDSet;
+}
- const BSONObj& BatchedUpsertDetail::getUpsertedID() const {
- dassert(_isUpsertedIDSet);
- return _upsertedID;
- }
+const BSONObj& BatchedUpsertDetail::getUpsertedID() const {
+ dassert(_isUpsertedIDSet);
+ return _upsertedID;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_upsert_detail.h b/src/mongo/s/write_ops/batched_upsert_detail.h
index cff8122f7ee..0f065ce6984 100644
--- a/src/mongo/s/write_ops/batched_upsert_detail.h
+++ b/src/mongo/s/write_ops/batched_upsert_detail.h
@@ -37,65 +37,65 @@
namespace mongo {
- /**
- * This class represents the layout and content of an idem inside the 'upserted' array
- * of a write command's response (see batched_command_response.h)
- */
- class BatchedUpsertDetail : public BSONSerializable {
- MONGO_DISALLOW_COPYING(BatchedUpsertDetail);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<int> index;
- static const BSONField<BSONObj> upsertedID; // ID type
-
- //
- // construction / destruction
- //
-
- BatchedUpsertDetail();
- virtual ~BatchedUpsertDetail();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(BatchedUpsertDetail* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setIndex(int index);
- void unsetIndex();
- bool isIndexSet() const;
- int getIndex() const;
-
- void setUpsertedID(const BSONObj& upsertedID);
- void unsetUpsertedID();
- bool isUpsertedIDSet() const;
- const BSONObj& getUpsertedID() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) number of the batch item the upsert refers to
- int _index;
- bool _isIndexSet;
-
- // (M) _id for the upserted document
- BSONObj _upsertedID;
- bool _isUpsertedIDSet;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of an idem inside the 'upserted' array
+ * of a write command's response (see batched_command_response.h)
+ */
+class BatchedUpsertDetail : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(BatchedUpsertDetail);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<int> index;
+ static const BSONField<BSONObj> upsertedID; // ID type
+
+ //
+ // construction / destruction
+ //
+
+ BatchedUpsertDetail();
+ virtual ~BatchedUpsertDetail();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(BatchedUpsertDetail* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setIndex(int index);
+ void unsetIndex();
+ bool isIndexSet() const;
+ int getIndex() const;
+
+ void setUpsertedID(const BSONObj& upsertedID);
+ void unsetUpsertedID();
+ bool isUpsertedIDSet() const;
+ const BSONObj& getUpsertedID() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) number of the batch item the upsert refers to
+ int _index;
+ bool _isIndexSet;
+
+ // (M) _id for the upserted document
+ BSONObj _upsertedID;
+ bool _isUpsertedIDSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/wc_error_detail.cpp b/src/mongo/s/write_ops/wc_error_detail.cpp
index 0f7536639ba..40474c819c7 100644
--- a/src/mongo/s/write_ops/wc_error_detail.cpp
+++ b/src/mongo/s/write_ops/wc_error_detail.cpp
@@ -33,150 +33,155 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
- const BSONField<int> WCErrorDetail::errCode("code");
- const BSONField<BSONObj> WCErrorDetail::errInfo("errInfo");
- const BSONField<std::string> WCErrorDetail::errMessage("errmsg");
+using mongoutils::str::stream;
+const BSONField<int> WCErrorDetail::errCode("code");
+const BSONField<BSONObj> WCErrorDetail::errInfo("errInfo");
+const BSONField<std::string> WCErrorDetail::errMessage("errmsg");
- WCErrorDetail::WCErrorDetail() {
- clear();
- }
-
- WCErrorDetail::~WCErrorDetail() {
- }
+WCErrorDetail::WCErrorDetail() {
+ clear();
+}
- bool WCErrorDetail::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+WCErrorDetail::~WCErrorDetail() {}
- // All the mandatory fields must be present.
- if (!_isErrCodeSet) {
- *errMsg = stream() << "missing " << errCode.name() << " field";
- return false;
- }
+bool WCErrorDetail::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
+ }
- return true;
+ // All the mandatory fields must be present.
+ if (!_isErrCodeSet) {
+ *errMsg = stream() << "missing " << errCode.name() << " field";
+ return false;
}
- BSONObj WCErrorDetail::toBSON() const {
- BSONObjBuilder builder;
+ return true;
+}
- if (_isErrCodeSet) builder.append(errCode(), _errCode);
+BSONObj WCErrorDetail::toBSON() const {
+ BSONObjBuilder builder;
- if (_isErrInfoSet) builder.append(errInfo(), _errInfo);
+ if (_isErrCodeSet)
+ builder.append(errCode(), _errCode);
- if (_isErrMessageSet) builder.append(errMessage(), _errMessage);
+ if (_isErrInfoSet)
+ builder.append(errInfo(), _errInfo);
- return builder.obj();
- }
+ if (_isErrMessageSet)
+ builder.append(errMessage(), _errMessage);
- bool WCErrorDetail::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+ return builder.obj();
+}
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+bool WCErrorDetail::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- fieldState = FieldParser::extract(source, errInfo, &_errInfo, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrInfoSet = fieldState == FieldParser::FIELD_SET;
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
- fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
+ fieldState = FieldParser::extract(source, errInfo, &_errInfo, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrInfoSet = fieldState == FieldParser::FIELD_SET;
- return true;
- }
+ fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
- void WCErrorDetail::clear() {
- _errCode = 0;
- _isErrCodeSet = false;
+ return true;
+}
- _errInfo = BSONObj();
- _isErrInfoSet = false;
+void WCErrorDetail::clear() {
+ _errCode = 0;
+ _isErrCodeSet = false;
- _errMessage.clear();
- _isErrMessageSet = false;
+ _errInfo = BSONObj();
+ _isErrInfoSet = false;
- }
+ _errMessage.clear();
+ _isErrMessageSet = false;
+}
- void WCErrorDetail::cloneTo(WCErrorDetail* other) const {
- other->clear();
+void WCErrorDetail::cloneTo(WCErrorDetail* other) const {
+ other->clear();
- other->_errCode = _errCode;
- other->_isErrCodeSet = _isErrCodeSet;
+ other->_errCode = _errCode;
+ other->_isErrCodeSet = _isErrCodeSet;
- other->_errInfo = _errInfo;
- other->_isErrInfoSet = _isErrInfoSet;
+ other->_errInfo = _errInfo;
+ other->_isErrInfoSet = _isErrInfoSet;
- other->_errMessage = _errMessage;
- other->_isErrMessageSet = _isErrMessageSet;
- }
+ other->_errMessage = _errMessage;
+ other->_isErrMessageSet = _isErrMessageSet;
+}
- std::string WCErrorDetail::toString() const {
- return "implement me";
- }
+std::string WCErrorDetail::toString() const {
+ return "implement me";
+}
- void WCErrorDetail::setErrCode(int errCode) {
- _errCode = errCode;
- _isErrCodeSet = true;
- }
+void WCErrorDetail::setErrCode(int errCode) {
+ _errCode = errCode;
+ _isErrCodeSet = true;
+}
- void WCErrorDetail::unsetErrCode() {
- _isErrCodeSet = false;
- }
+void WCErrorDetail::unsetErrCode() {
+ _isErrCodeSet = false;
+}
- bool WCErrorDetail::isErrCodeSet() const {
- return _isErrCodeSet;
- }
+bool WCErrorDetail::isErrCodeSet() const {
+ return _isErrCodeSet;
+}
- int WCErrorDetail::getErrCode() const {
- dassert(_isErrCodeSet);
- return _errCode;
- }
+int WCErrorDetail::getErrCode() const {
+ dassert(_isErrCodeSet);
+ return _errCode;
+}
- void WCErrorDetail::setErrInfo(const BSONObj& errInfo) {
- _errInfo = errInfo.getOwned();
- _isErrInfoSet = true;
- }
+void WCErrorDetail::setErrInfo(const BSONObj& errInfo) {
+ _errInfo = errInfo.getOwned();
+ _isErrInfoSet = true;
+}
- void WCErrorDetail::unsetErrInfo() {
- _isErrInfoSet = false;
- }
+void WCErrorDetail::unsetErrInfo() {
+ _isErrInfoSet = false;
+}
- bool WCErrorDetail::isErrInfoSet() const {
- return _isErrInfoSet;
- }
+bool WCErrorDetail::isErrInfoSet() const {
+ return _isErrInfoSet;
+}
- const BSONObj& WCErrorDetail::getErrInfo() const {
- dassert(_isErrInfoSet);
- return _errInfo;
- }
+const BSONObj& WCErrorDetail::getErrInfo() const {
+ dassert(_isErrInfoSet);
+ return _errInfo;
+}
- void WCErrorDetail::setErrMessage(StringData errMessage) {
- _errMessage = errMessage.toString();
- _isErrMessageSet = true;
- }
-
- void WCErrorDetail::unsetErrMessage() {
- _isErrMessageSet = false;
- }
+void WCErrorDetail::setErrMessage(StringData errMessage) {
+ _errMessage = errMessage.toString();
+ _isErrMessageSet = true;
+}
- bool WCErrorDetail::isErrMessageSet() const {
- return _isErrMessageSet;
- }
+void WCErrorDetail::unsetErrMessage() {
+ _isErrMessageSet = false;
+}
- const std::string& WCErrorDetail::getErrMessage() const {
- dassert(_isErrMessageSet);
- return _errMessage;
- }
+bool WCErrorDetail::isErrMessageSet() const {
+ return _isErrMessageSet;
+}
-} // namespace mongo
+const std::string& WCErrorDetail::getErrMessage() const {
+ dassert(_isErrMessageSet);
+ return _errMessage;
+}
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/wc_error_detail.h b/src/mongo/s/write_ops/wc_error_detail.h
index 2f07008b02b..6bf1a757997 100644
--- a/src/mongo/s/write_ops/wc_error_detail.h
+++ b/src/mongo/s/write_ops/wc_error_detail.h
@@ -37,75 +37,75 @@
namespace mongo {
- /**
- * This class represents the layout and content of the error that occurs while trying
- * to satisfy the write concern after executing the insert/update/delete runCommand.
- */
- class WCErrorDetail : public BSONSerializable {
- MONGO_DISALLOW_COPYING(WCErrorDetail);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<int> errCode;
- static const BSONField<BSONObj> errInfo;
- static const BSONField<std::string> errMessage;
-
- //
- // construction / destruction
- //
-
- WCErrorDetail();
- virtual ~WCErrorDetail();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(WCErrorDetail* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setErrCode(int errCode);
- void unsetErrCode();
- bool isErrCodeSet() const;
- int getErrCode() const;
-
- void setErrInfo(const BSONObj& errInfo);
- void unsetErrInfo();
- bool isErrInfoSet() const;
- const BSONObj& getErrInfo() const;
-
- void setErrMessage(StringData errMessage);
- void unsetErrMessage();
- bool isErrMessageSet() const;
- const std::string& getErrMessage() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) error code for the write concern error.
- int _errCode;
- bool _isErrCodeSet;
-
- // (O) further details about the write concern error.
- BSONObj _errInfo;
- bool _isErrInfoSet;
-
- // (O) user readable explanation about the write concern error.
- std::string _errMessage;
- bool _isErrMessageSet;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of the error that occurs while trying
+ * to satisfy the write concern after executing the insert/update/delete runCommand.
+ */
+class WCErrorDetail : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(WCErrorDetail);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<int> errCode;
+ static const BSONField<BSONObj> errInfo;
+ static const BSONField<std::string> errMessage;
+
+ //
+ // construction / destruction
+ //
+
+ WCErrorDetail();
+ virtual ~WCErrorDetail();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(WCErrorDetail* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setErrCode(int errCode);
+ void unsetErrCode();
+ bool isErrCodeSet() const;
+ int getErrCode() const;
+
+ void setErrInfo(const BSONObj& errInfo);
+ void unsetErrInfo();
+ bool isErrInfoSet() const;
+ const BSONObj& getErrInfo() const;
+
+ void setErrMessage(StringData errMessage);
+ void unsetErrMessage();
+ bool isErrMessageSet() const;
+ const std::string& getErrMessage() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) error code for the write concern error.
+ int _errCode;
+ bool _isErrCodeSet;
+
+ // (O) further details about the write concern error.
+ BSONObj _errInfo;
+ bool _isErrInfoSet;
+
+ // (O) user readable explanation about the write concern error.
+ std::string _errMessage;
+ bool _isErrMessageSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/write_error_detail.cpp b/src/mongo/s/write_ops/write_error_detail.cpp
index 3d5b9725fdf..b374ff47638 100644
--- a/src/mongo/s/write_ops/write_error_detail.cpp
+++ b/src/mongo/s/write_ops/write_error_detail.cpp
@@ -33,186 +33,193 @@
namespace mongo {
- using std::string;
+using std::string;
- using mongoutils::str::stream;
- const BSONField<int> WriteErrorDetail::index("index");
- const BSONField<int> WriteErrorDetail::errCode("code");
- const BSONField<BSONObj> WriteErrorDetail::errInfo("errInfo");
- const BSONField<std::string> WriteErrorDetail::errMessage("errmsg");
+using mongoutils::str::stream;
+const BSONField<int> WriteErrorDetail::index("index");
+const BSONField<int> WriteErrorDetail::errCode("code");
+const BSONField<BSONObj> WriteErrorDetail::errInfo("errInfo");
+const BSONField<std::string> WriteErrorDetail::errMessage("errmsg");
- WriteErrorDetail::WriteErrorDetail() {
- clear();
- }
-
- WriteErrorDetail::~WriteErrorDetail() {
- }
-
- bool WriteErrorDetail::isValid(std::string* errMsg) const {
- std::string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
+WriteErrorDetail::WriteErrorDetail() {
+ clear();
+}
- // All the mandatory fields must be present.
- if (!_isIndexSet) {
- *errMsg = stream() << "missing " << index.name() << " field";
- return false;
- }
+WriteErrorDetail::~WriteErrorDetail() {}
- if (!_isErrCodeSet) {
- *errMsg = stream() << "missing " << errCode.name() << " field";
- return false;
- }
-
- return true;
+bool WriteErrorDetail::isValid(std::string* errMsg) const {
+ std::string dummy;
+ if (errMsg == NULL) {
+ errMsg = &dummy;
}
- BSONObj WriteErrorDetail::toBSON() const {
- BSONObjBuilder builder;
-
- if (_isIndexSet) builder.append(index(), _index);
-
- if (_isErrCodeSet) builder.append(errCode(), _errCode);
-
- if (_isErrInfoSet) builder.append(errInfo(), _errInfo);
-
- if (_isErrMessageSet) builder.append(errMessage(), _errMessage);
+ // All the mandatory fields must be present.
+ if (!_isIndexSet) {
+ *errMsg = stream() << "missing " << index.name() << " field";
+ return false;
+ }
- return builder.obj();
+ if (!_isErrCodeSet) {
+ *errMsg = stream() << "missing " << errCode.name() << " field";
+ return false;
}
- bool WriteErrorDetail::parseBSON(const BSONObj& source, string* errMsg) {
- clear();
+ return true;
+}
- std::string dummy;
- if (!errMsg) errMsg = &dummy;
+BSONObj WriteErrorDetail::toBSON() const {
+ BSONObjBuilder builder;
- FieldParser::FieldState fieldState;
- fieldState = FieldParser::extract(source, index, &_index, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isIndexSet = fieldState == FieldParser::FIELD_SET;
+ if (_isIndexSet)
+ builder.append(index(), _index);
- fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
+ if (_isErrCodeSet)
+ builder.append(errCode(), _errCode);
- fieldState = FieldParser::extract(source, errInfo, &_errInfo, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrInfoSet = fieldState == FieldParser::FIELD_SET;
+ if (_isErrInfoSet)
+ builder.append(errInfo(), _errInfo);
- fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
- if (fieldState == FieldParser::FIELD_INVALID) return false;
- _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
+ if (_isErrMessageSet)
+ builder.append(errMessage(), _errMessage);
- return true;
- }
+ return builder.obj();
+}
- void WriteErrorDetail::clear() {
- _index = 0;
- _isIndexSet = false;
+bool WriteErrorDetail::parseBSON(const BSONObj& source, string* errMsg) {
+ clear();
- _errCode = 0;
- _isErrCodeSet = false;
+ std::string dummy;
+ if (!errMsg)
+ errMsg = &dummy;
- _errInfo = BSONObj();
- _isErrInfoSet = false;
+ FieldParser::FieldState fieldState;
+ fieldState = FieldParser::extract(source, index, &_index, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isIndexSet = fieldState == FieldParser::FIELD_SET;
- _errMessage.clear();
- _isErrMessageSet = false;
+ fieldState = FieldParser::extract(source, errCode, &_errCode, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrCodeSet = fieldState == FieldParser::FIELD_SET;
- }
+ fieldState = FieldParser::extract(source, errInfo, &_errInfo, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrInfoSet = fieldState == FieldParser::FIELD_SET;
- void WriteErrorDetail::cloneTo(WriteErrorDetail* other) const {
- other->clear();
+ fieldState = FieldParser::extract(source, errMessage, &_errMessage, errMsg);
+ if (fieldState == FieldParser::FIELD_INVALID)
+ return false;
+ _isErrMessageSet = fieldState == FieldParser::FIELD_SET;
- other->_index = _index;
- other->_isIndexSet = _isIndexSet;
+ return true;
+}
- other->_errCode = _errCode;
- other->_isErrCodeSet = _isErrCodeSet;
+void WriteErrorDetail::clear() {
+ _index = 0;
+ _isIndexSet = false;
- other->_errInfo = _errInfo;
- other->_isErrInfoSet = _isErrInfoSet;
+ _errCode = 0;
+ _isErrCodeSet = false;
- other->_errMessage = _errMessage;
- other->_isErrMessageSet = _isErrMessageSet;
- }
+ _errInfo = BSONObj();
+ _isErrInfoSet = false;
- std::string WriteErrorDetail::toString() const {
- return "implement me";
- }
+ _errMessage.clear();
+ _isErrMessageSet = false;
+}
- void WriteErrorDetail::setIndex(int index) {
- _index = index;
- _isIndexSet = true;
- }
+void WriteErrorDetail::cloneTo(WriteErrorDetail* other) const {
+ other->clear();
- void WriteErrorDetail::unsetIndex() {
- _isIndexSet = false;
- }
+ other->_index = _index;
+ other->_isIndexSet = _isIndexSet;
- bool WriteErrorDetail::isIndexSet() const {
- return _isIndexSet;
- }
+ other->_errCode = _errCode;
+ other->_isErrCodeSet = _isErrCodeSet;
- int WriteErrorDetail::getIndex() const {
- dassert(_isIndexSet);
- return _index;
- }
+ other->_errInfo = _errInfo;
+ other->_isErrInfoSet = _isErrInfoSet;
- void WriteErrorDetail::setErrCode(int errCode) {
- _errCode = errCode;
- _isErrCodeSet = true;
- }
+ other->_errMessage = _errMessage;
+ other->_isErrMessageSet = _isErrMessageSet;
+}
- void WriteErrorDetail::unsetErrCode() {
- _isErrCodeSet = false;
- }
+std::string WriteErrorDetail::toString() const {
+ return "implement me";
+}
- bool WriteErrorDetail::isErrCodeSet() const {
- return _isErrCodeSet;
- }
+void WriteErrorDetail::setIndex(int index) {
+ _index = index;
+ _isIndexSet = true;
+}
- int WriteErrorDetail::getErrCode() const {
- dassert(_isErrCodeSet);
- return _errCode;
- }
+void WriteErrorDetail::unsetIndex() {
+ _isIndexSet = false;
+}
- void WriteErrorDetail::setErrInfo(const BSONObj& errInfo) {
- _errInfo = errInfo.getOwned();
- _isErrInfoSet = true;
- }
+bool WriteErrorDetail::isIndexSet() const {
+ return _isIndexSet;
+}
- void WriteErrorDetail::unsetErrInfo() {
- _isErrInfoSet = false;
- }
+int WriteErrorDetail::getIndex() const {
+ dassert(_isIndexSet);
+ return _index;
+}
- bool WriteErrorDetail::isErrInfoSet() const {
- return _isErrInfoSet;
- }
+void WriteErrorDetail::setErrCode(int errCode) {
+ _errCode = errCode;
+ _isErrCodeSet = true;
+}
- const BSONObj& WriteErrorDetail::getErrInfo() const {
- dassert(_isErrInfoSet);
- return _errInfo;
- }
+void WriteErrorDetail::unsetErrCode() {
+ _isErrCodeSet = false;
+}
- void WriteErrorDetail::setErrMessage(StringData errMessage) {
- _errMessage = errMessage.toString();
- _isErrMessageSet = true;
- }
+bool WriteErrorDetail::isErrCodeSet() const {
+ return _isErrCodeSet;
+}
- void WriteErrorDetail::unsetErrMessage() {
- _isErrMessageSet = false;
- }
+int WriteErrorDetail::getErrCode() const {
+ dassert(_isErrCodeSet);
+ return _errCode;
+}
+
+void WriteErrorDetail::setErrInfo(const BSONObj& errInfo) {
+ _errInfo = errInfo.getOwned();
+ _isErrInfoSet = true;
+}
+
+void WriteErrorDetail::unsetErrInfo() {
+ _isErrInfoSet = false;
+}
+
+bool WriteErrorDetail::isErrInfoSet() const {
+ return _isErrInfoSet;
+}
+
+const BSONObj& WriteErrorDetail::getErrInfo() const {
+ dassert(_isErrInfoSet);
+ return _errInfo;
+}
+
+void WriteErrorDetail::setErrMessage(StringData errMessage) {
+ _errMessage = errMessage.toString();
+ _isErrMessageSet = true;
+}
- bool WriteErrorDetail::isErrMessageSet() const {
- return _isErrMessageSet;
- }
+void WriteErrorDetail::unsetErrMessage() {
+ _isErrMessageSet = false;
+}
- const std::string& WriteErrorDetail::getErrMessage() const {
- dassert(_isErrMessageSet);
- return _errMessage;
- }
+bool WriteErrorDetail::isErrMessageSet() const {
+ return _isErrMessageSet;
+}
+
+const std::string& WriteErrorDetail::getErrMessage() const {
+ dassert(_isErrMessageSet);
+ return _errMessage;
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/write_error_detail.h b/src/mongo/s/write_ops/write_error_detail.h
index 49f9b2d8384..9ac64a85b5d 100644
--- a/src/mongo/s/write_ops/write_error_detail.h
+++ b/src/mongo/s/write_ops/write_error_detail.h
@@ -37,85 +37,85 @@
namespace mongo {
- /**
- * This class represents the layout and content of a insert/update/delete runCommand,
- * the response side.
- */
- class WriteErrorDetail : public BSONSerializable {
- MONGO_DISALLOW_COPYING(WriteErrorDetail);
- public:
-
- //
- // schema declarations
- //
-
- static const BSONField<int> index;
- static const BSONField<int> errCode;
- static const BSONField<BSONObj> errInfo;
- static const BSONField<std::string> errMessage;
-
- //
- // construction / destruction
- //
-
- WriteErrorDetail();
- virtual ~WriteErrorDetail();
-
- /** Copies all the fields present in 'this' to 'other'. */
- void cloneTo(WriteErrorDetail* other) const;
-
- //
- // bson serializable interface implementation
- //
-
- virtual bool isValid(std::string* errMsg) const;
- virtual BSONObj toBSON() const;
- virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
- virtual void clear();
- virtual std::string toString() const;
-
- //
- // individual field accessors
- //
-
- void setIndex(int index);
- void unsetIndex();
- bool isIndexSet() const;
- int getIndex() const;
-
- void setErrCode(int errCode);
- void unsetErrCode();
- bool isErrCodeSet() const;
- int getErrCode() const;
-
- void setErrInfo(const BSONObj& errInfo);
- void unsetErrInfo();
- bool isErrInfoSet() const;
- const BSONObj& getErrInfo() const;
-
- void setErrMessage(StringData errMessage);
- void unsetErrMessage();
- bool isErrMessageSet() const;
- const std::string& getErrMessage() const;
-
- private:
- // Convention: (M)andatory, (O)ptional
-
- // (M) number of the batch item the error refers to
- int _index;
- bool _isIndexSet;
-
- // (M) whether all items in the batch applied correctly
- int _errCode;
- bool _isErrCodeSet;
-
- // (O) further details about the batch item error
- BSONObj _errInfo;
- bool _isErrInfoSet;
-
- // (O) user readable explanation about the batch item error
- std::string _errMessage;
- bool _isErrMessageSet;
- };
-
-} // namespace mongo
+/**
+ * This class represents the layout and content of a insert/update/delete runCommand,
+ * the response side.
+ */
+class WriteErrorDetail : public BSONSerializable {
+ MONGO_DISALLOW_COPYING(WriteErrorDetail);
+
+public:
+ //
+ // schema declarations
+ //
+
+ static const BSONField<int> index;
+ static const BSONField<int> errCode;
+ static const BSONField<BSONObj> errInfo;
+ static const BSONField<std::string> errMessage;
+
+ //
+ // construction / destruction
+ //
+
+ WriteErrorDetail();
+ virtual ~WriteErrorDetail();
+
+ /** Copies all the fields present in 'this' to 'other'. */
+ void cloneTo(WriteErrorDetail* other) const;
+
+ //
+ // bson serializable interface implementation
+ //
+
+ virtual bool isValid(std::string* errMsg) const;
+ virtual BSONObj toBSON() const;
+ virtual bool parseBSON(const BSONObj& source, std::string* errMsg);
+ virtual void clear();
+ virtual std::string toString() const;
+
+ //
+ // individual field accessors
+ //
+
+ void setIndex(int index);
+ void unsetIndex();
+ bool isIndexSet() const;
+ int getIndex() const;
+
+ void setErrCode(int errCode);
+ void unsetErrCode();
+ bool isErrCodeSet() const;
+ int getErrCode() const;
+
+ void setErrInfo(const BSONObj& errInfo);
+ void unsetErrInfo();
+ bool isErrInfoSet() const;
+ const BSONObj& getErrInfo() const;
+
+ void setErrMessage(StringData errMessage);
+ void unsetErrMessage();
+ bool isErrMessageSet() const;
+ const std::string& getErrMessage() const;
+
+private:
+ // Convention: (M)andatory, (O)ptional
+
+ // (M) number of the batch item the error refers to
+ int _index;
+ bool _isIndexSet;
+
+ // (M) whether all items in the batch applied correctly
+ int _errCode;
+ bool _isErrCodeSet;
+
+ // (O) further details about the batch item error
+ BSONObj _errInfo;
+ bool _isErrInfoSet;
+
+ // (O) user readable explanation about the batch item error
+ std::string _errMessage;
+ bool _isErrMessageSet;
+};
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 9532922237f..f630802121d 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -34,258 +34,239 @@
namespace mongo {
- using std::stringstream;
- using std::vector;
+using std::stringstream;
+using std::vector;
- static void clear( vector<ChildWriteOp*>* childOps ) {
- for ( vector<ChildWriteOp*>::const_iterator it = childOps->begin(); it != childOps->end();
- ++it ) {
- delete *it;
- }
- childOps->clear();
- }
-
- WriteOp::~WriteOp() {
- clear( &_childOps );
- clear( &_history );
- }
-
- const BatchItemRef& WriteOp::getWriteItem() const {
- return _itemRef;
- }
-
- WriteOpState WriteOp::getWriteState() const {
- return _state;
- }
-
- const WriteErrorDetail& WriteOp::getOpError() const {
- dassert( _state == WriteOpState_Error );
- return *_error;
+static void clear(vector<ChildWriteOp*>* childOps) {
+ for (vector<ChildWriteOp*>::const_iterator it = childOps->begin(); it != childOps->end();
+ ++it) {
+ delete *it;
}
+ childOps->clear();
+}
- Status WriteOp::targetWrites( const NSTargeter& targeter,
- std::vector<TargetedWrite*>* targetedWrites ) {
-
- bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
- bool isDelete = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Delete;
- bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
-
- Status targetStatus = Status::OK();
- OwnedPointerVector<ShardEndpoint> endpointsOwned;
- vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
+WriteOp::~WriteOp() {
+ clear(&_childOps);
+ clear(&_history);
+}
- if ( isUpdate ) {
- targetStatus = targeter.targetUpdate( *_itemRef.getUpdate(), &endpoints );
- }
- else if ( isDelete ) {
- targetStatus = targeter.targetDelete( *_itemRef.getDelete(), &endpoints );
- }
- else {
- dassert( _itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert );
+const BatchItemRef& WriteOp::getWriteItem() const {
+ return _itemRef;
+}
- ShardEndpoint* endpoint = NULL;
- // TODO: Remove the index targeting stuff once there is a command for it
- if ( !isIndexInsert ) {
- targetStatus = targeter.targetInsert( _itemRef.getDocument(), &endpoint );
- }
- else {
- // TODO: Retry index writes with stale version?
- targetStatus = targeter.targetCollection( &endpoints );
- }
+WriteOpState WriteOp::getWriteState() const {
+ return _state;
+}
- if ( !targetStatus.isOK() ) {
- dassert( NULL == endpoint );
- return targetStatus;
- }
+const WriteErrorDetail& WriteOp::getOpError() const {
+ dassert(_state == WriteOpState_Error);
+ return *_error;
+}
- // Store single endpoint result if we targeted a single endpoint
- if ( endpoint ) endpoints.push_back( endpoint );
+Status WriteOp::targetWrites(const NSTargeter& targeter,
+ std::vector<TargetedWrite*>* targetedWrites) {
+ bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
+ bool isDelete = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Delete;
+ bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
+
+ Status targetStatus = Status::OK();
+ OwnedPointerVector<ShardEndpoint> endpointsOwned;
+ vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
+
+ if (isUpdate) {
+ targetStatus = targeter.targetUpdate(*_itemRef.getUpdate(), &endpoints);
+ } else if (isDelete) {
+ targetStatus = targeter.targetDelete(*_itemRef.getDelete(), &endpoints);
+ } else {
+ dassert(_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert);
+
+ ShardEndpoint* endpoint = NULL;
+ // TODO: Remove the index targeting stuff once there is a command for it
+ if (!isIndexInsert) {
+ targetStatus = targeter.targetInsert(_itemRef.getDocument(), &endpoint);
+ } else {
+ // TODO: Retry index writes with stale version?
+ targetStatus = targeter.targetCollection(&endpoints);
}
- // If we're targeting more than one endpoint with an update/delete, we have to target
- // everywhere since we cannot currently retry partial results.
- // NOTE: Index inserts are currently specially targeted only at the current collection to
- // avoid creating collections everywhere.
- if ( targetStatus.isOK() && endpoints.size() > 1u && !isIndexInsert ) {
- endpointsOwned.clear();
- invariant( endpoints.empty() );
- targetStatus = targeter.targetAllShards( &endpoints );
+ if (!targetStatus.isOK()) {
+ dassert(NULL == endpoint);
+ return targetStatus;
}
- // If we had an error, stop here
- if ( !targetStatus.isOK() ) return targetStatus;
+ // Store single endpoint result if we targeted a single endpoint
+ if (endpoint)
+ endpoints.push_back(endpoint);
+ }
- for ( vector<ShardEndpoint*>::iterator it = endpoints.begin(); it != endpoints.end();
- ++it ) {
+ // If we're targeting more than one endpoint with an update/delete, we have to target
+ // everywhere since we cannot currently retry partial results.
+ // NOTE: Index inserts are currently specially targeted only at the current collection to
+ // avoid creating collections everywhere.
+ if (targetStatus.isOK() && endpoints.size() > 1u && !isIndexInsert) {
+ endpointsOwned.clear();
+ invariant(endpoints.empty());
+ targetStatus = targeter.targetAllShards(&endpoints);
+ }
- ShardEndpoint* endpoint = *it;
+ // If we had an error, stop here
+ if (!targetStatus.isOK())
+ return targetStatus;
- _childOps.push_back( new ChildWriteOp( this ) );
+ for (vector<ShardEndpoint*>::iterator it = endpoints.begin(); it != endpoints.end(); ++it) {
+ ShardEndpoint* endpoint = *it;
- WriteOpRef ref( _itemRef.getItemIndex(), _childOps.size() - 1 );
+ _childOps.push_back(new ChildWriteOp(this));
- // For now, multiple endpoints imply no versioning - we can't retry half a multi-write
- if ( endpoints.size() == 1u ) {
- targetedWrites->push_back( new TargetedWrite( *endpoint, ref ) );
- }
- else {
- ShardEndpoint broadcastEndpoint( endpoint->shardName,
- ChunkVersion::IGNORED() );
- targetedWrites->push_back( new TargetedWrite( broadcastEndpoint, ref ) );
- }
+ WriteOpRef ref(_itemRef.getItemIndex(), _childOps.size() - 1);
- _childOps.back()->pendingWrite = targetedWrites->back();
- _childOps.back()->state = WriteOpState_Pending;
+ // For now, multiple endpoints imply no versioning - we can't retry half a multi-write
+ if (endpoints.size() == 1u) {
+ targetedWrites->push_back(new TargetedWrite(*endpoint, ref));
+ } else {
+ ShardEndpoint broadcastEndpoint(endpoint->shardName, ChunkVersion::IGNORED());
+ targetedWrites->push_back(new TargetedWrite(broadcastEndpoint, ref));
}
- _state = WriteOpState_Pending;
- return Status::OK();
- }
-
- size_t WriteOp::getNumTargeted() {
- return _childOps.size();
+ _childOps.back()->pendingWrite = targetedWrites->back();
+ _childOps.back()->state = WriteOpState_Pending;
}
- static bool isRetryErrCode( int errCode ) {
- return errCode == ErrorCodes::StaleShardVersion;
- }
+ _state = WriteOpState_Pending;
+ return Status::OK();
+}
- // Aggregate a bunch of errors for a single op together
- static void combineOpErrors( const vector<ChildWriteOp*>& errOps, WriteErrorDetail* error ) {
+size_t WriteOp::getNumTargeted() {
+ return _childOps.size();
+}
- // Special case single response
- if ( errOps.size() == 1 ) {
- errOps.front()->error->cloneTo( error );
- return;
- }
+static bool isRetryErrCode(int errCode) {
+ return errCode == ErrorCodes::StaleShardVersion;
+}
- error->setErrCode( ErrorCodes::MultipleErrorsOccurred );
+// Aggregate a bunch of errors for a single op together
+static void combineOpErrors(const vector<ChildWriteOp*>& errOps, WriteErrorDetail* error) {
+ // Special case single response
+ if (errOps.size() == 1) {
+ errOps.front()->error->cloneTo(error);
+ return;
+ }
- // Generate the multi-error message below
- stringstream msg;
- msg << "multiple errors for op : ";
+ error->setErrCode(ErrorCodes::MultipleErrorsOccurred);
- BSONArrayBuilder errB;
- for ( vector<ChildWriteOp*>::const_iterator it = errOps.begin(); it != errOps.end();
- ++it ) {
- const ChildWriteOp* errOp = *it;
- if ( it != errOps.begin() ) msg << " :: and :: ";
- msg << errOp->error->getErrMessage();
- errB.append( errOp->error->toBSON() );
- }
+ // Generate the multi-error message below
+ stringstream msg;
+ msg << "multiple errors for op : ";
- error->setErrInfo( BSON( "causedBy" << errB.arr() ) );
- error->setIndex( errOps.front()->error->getIndex() );
- error->setErrMessage( msg.str() );
+ BSONArrayBuilder errB;
+ for (vector<ChildWriteOp*>::const_iterator it = errOps.begin(); it != errOps.end(); ++it) {
+ const ChildWriteOp* errOp = *it;
+ if (it != errOps.begin())
+ msg << " :: and :: ";
+ msg << errOp->error->getErrMessage();
+ errB.append(errOp->error->toBSON());
}
- /**
- * This is the core function which aggregates all the results of a write operation on multiple
- * shards and updates the write operation's state.
- */
- void WriteOp::updateOpState() {
-
- vector<ChildWriteOp*> childErrors;
-
- bool isRetryError = true;
- for ( vector<ChildWriteOp*>::iterator it = _childOps.begin(); it != _childOps.end();
- it++ ) {
+ error->setErrInfo(BSON("causedBy" << errB.arr()));
+ error->setIndex(errOps.front()->error->getIndex());
+ error->setErrMessage(msg.str());
+}
- ChildWriteOp* childOp = *it;
+/**
+ * This is the core function which aggregates all the results of a write operation on multiple
+ * shards and updates the write operation's state.
+ */
+void WriteOp::updateOpState() {
+ vector<ChildWriteOp*> childErrors;
- // Don't do anything till we have all the info
- if ( childOp->state != WriteOpState_Completed
- && childOp->state != WriteOpState_Error ) {
- return;
- }
+ bool isRetryError = true;
+ for (vector<ChildWriteOp*>::iterator it = _childOps.begin(); it != _childOps.end(); it++) {
+ ChildWriteOp* childOp = *it;
- if ( childOp->state == WriteOpState_Error ) {
- childErrors.push_back( childOp );
- // Any non-retry error aborts all
- if ( !isRetryErrCode( childOp->error->getErrCode() ) ) isRetryError = false;
- }
+ // Don't do anything till we have all the info
+ if (childOp->state != WriteOpState_Completed && childOp->state != WriteOpState_Error) {
+ return;
}
- if ( !childErrors.empty() && isRetryError ) {
- // Since we're using broadcast mode for multi-shard writes, which cannot SCE
- dassert( childErrors.size() == 1u );
- _state = WriteOpState_Ready;
- }
- else if ( !childErrors.empty() ) {
- _error.reset( new WriteErrorDetail );
- combineOpErrors( childErrors, _error.get() );
- _state = WriteOpState_Error;
+ if (childOp->state == WriteOpState_Error) {
+ childErrors.push_back(childOp);
+ // Any non-retry error aborts all
+ if (!isRetryErrCode(childOp->error->getErrCode()))
+ isRetryError = false;
}
- else {
- _state = WriteOpState_Completed;
- }
-
- // Now that we're done with the child ops, do something with them
- // TODO: Don't store unlimited history?
- dassert( _state != WriteOpState_Pending );
- _history.insert( _history.end(), _childOps.begin(), _childOps.end() );
- _childOps.clear();
}
- void WriteOp::cancelWrites( const WriteErrorDetail* why ) {
+ if (!childErrors.empty() && isRetryError) {
+ // Since we're using broadcast mode for multi-shard writes, which cannot SCE
+ dassert(childErrors.size() == 1u);
+ _state = WriteOpState_Ready;
+ } else if (!childErrors.empty()) {
+ _error.reset(new WriteErrorDetail);
+ combineOpErrors(childErrors, _error.get());
+ _state = WriteOpState_Error;
+ } else {
+ _state = WriteOpState_Completed;
+ }
- dassert( _state == WriteOpState_Pending || _state == WriteOpState_Ready );
- for ( vector<ChildWriteOp*>::iterator it = _childOps.begin(); it != _childOps.end();
- ++it ) {
+ // Now that we're done with the child ops, do something with them
+ // TODO: Don't store unlimited history?
+ dassert(_state != WriteOpState_Pending);
+ _history.insert(_history.end(), _childOps.begin(), _childOps.end());
+ _childOps.clear();
+}
- ChildWriteOp* childOp = *it;
+void WriteOp::cancelWrites(const WriteErrorDetail* why) {
+ dassert(_state == WriteOpState_Pending || _state == WriteOpState_Ready);
+ for (vector<ChildWriteOp*>::iterator it = _childOps.begin(); it != _childOps.end(); ++it) {
+ ChildWriteOp* childOp = *it;
- if ( childOp->state == WriteOpState_Pending ) {
- childOp->endpoint.reset( new ShardEndpoint( childOp->pendingWrite->endpoint ) );
- if ( why ) {
- childOp->error.reset( new WriteErrorDetail );
- why->cloneTo( childOp->error.get() );
- }
- childOp->state = WriteOpState_Cancelled;
+ if (childOp->state == WriteOpState_Pending) {
+ childOp->endpoint.reset(new ShardEndpoint(childOp->pendingWrite->endpoint));
+ if (why) {
+ childOp->error.reset(new WriteErrorDetail);
+ why->cloneTo(childOp->error.get());
}
+ childOp->state = WriteOpState_Cancelled;
}
-
- _history.insert( _history.end(), _childOps.begin(), _childOps.end() );
- _childOps.clear();
-
- _state = WriteOpState_Ready;
}
- void WriteOp::noteWriteComplete( const TargetedWrite& targetedWrite ) {
-
- const WriteOpRef& ref = targetedWrite.writeOpRef;
- dassert( static_cast<size_t>( ref.second ) < _childOps.size() );
- ChildWriteOp& childOp = *_childOps.at( ref.second );
-
- childOp.pendingWrite = NULL;
- childOp.endpoint.reset( new ShardEndpoint( targetedWrite.endpoint ) );
- childOp.state = WriteOpState_Completed;
- updateOpState();
- }
+ _history.insert(_history.end(), _childOps.begin(), _childOps.end());
+ _childOps.clear();
- void WriteOp::noteWriteError( const TargetedWrite& targetedWrite,
- const WriteErrorDetail& error ) {
+ _state = WriteOpState_Ready;
+}
- const WriteOpRef& ref = targetedWrite.writeOpRef;
- ChildWriteOp& childOp = *_childOps.at( ref.second );
+void WriteOp::noteWriteComplete(const TargetedWrite& targetedWrite) {
+ const WriteOpRef& ref = targetedWrite.writeOpRef;
+ dassert(static_cast<size_t>(ref.second) < _childOps.size());
+ ChildWriteOp& childOp = *_childOps.at(ref.second);
- childOp.pendingWrite = NULL;
- childOp.endpoint.reset( new ShardEndpoint( targetedWrite.endpoint ) );
- childOp.error.reset( new WriteErrorDetail );
- error.cloneTo( childOp.error.get() );
- dassert( ref.first == _itemRef.getItemIndex() );
- childOp.error->setIndex( _itemRef.getItemIndex() );
- childOp.state = WriteOpState_Error;
- updateOpState();
- }
+ childOp.pendingWrite = NULL;
+ childOp.endpoint.reset(new ShardEndpoint(targetedWrite.endpoint));
+ childOp.state = WriteOpState_Completed;
+ updateOpState();
+}
- void WriteOp::setOpError( const WriteErrorDetail& error ) {
- dassert( _state == WriteOpState_Ready );
- _error.reset( new WriteErrorDetail );
- error.cloneTo( _error.get() );
- _error->setIndex( _itemRef.getItemIndex() );
- _state = WriteOpState_Error;
- // No need to updateOpState, set directly
- }
+void WriteOp::noteWriteError(const TargetedWrite& targetedWrite, const WriteErrorDetail& error) {
+ const WriteOpRef& ref = targetedWrite.writeOpRef;
+ ChildWriteOp& childOp = *_childOps.at(ref.second);
+
+ childOp.pendingWrite = NULL;
+ childOp.endpoint.reset(new ShardEndpoint(targetedWrite.endpoint));
+ childOp.error.reset(new WriteErrorDetail);
+ error.cloneTo(childOp.error.get());
+ dassert(ref.first == _itemRef.getItemIndex());
+ childOp.error->setIndex(_itemRef.getItemIndex());
+ childOp.state = WriteOpState_Error;
+ updateOpState();
+}
+void WriteOp::setOpError(const WriteErrorDetail& error) {
+ dassert(_state == WriteOpState_Ready);
+ _error.reset(new WriteErrorDetail);
+ error.cloneTo(_error.get());
+ _error->setIndex(_itemRef.getItemIndex());
+ _state = WriteOpState_Error;
+ // No need to updateOpState, set directly
+}
}
diff --git a/src/mongo/s/write_ops/write_op.h b/src/mongo/s/write_ops/write_op.h
index 98ccf0d691d..fb189edfffb 100644
--- a/src/mongo/s/write_ops/write_op.h
+++ b/src/mongo/s/write_ops/write_op.h
@@ -38,206 +38,196 @@
namespace mongo {
- struct TargetedWrite;
- struct ChildWriteOp;
+struct TargetedWrite;
+struct ChildWriteOp;
- enum WriteOpState {
+enum WriteOpState {
- // Item is ready to be targeted
- WriteOpState_Ready,
+ // Item is ready to be targeted
+ WriteOpState_Ready,
- // Item is targeted and we're waiting for outstanding shard requests to populate
- // responses
- WriteOpState_Pending,
+ // Item is targeted and we're waiting for outstanding shard requests to populate
+ // responses
+ WriteOpState_Pending,
- // Op was successful, write completed
- // We assume all states higher than this one are *final*
- WriteOpState_Completed,
+ // Op was successful, write completed
+ // We assume all states higher than this one are *final*
+ WriteOpState_Completed,
- // Op failed with some error
- WriteOpState_Error,
+ // Op failed with some error
+ WriteOpState_Error,
- // Op was cancelled before sending (only child write ops can be cancelled)
- WriteOpState_Cancelled,
+ // Op was cancelled before sending (only child write ops can be cancelled)
+ WriteOpState_Cancelled,
- // Catch-all error state.
- WriteOpState_Unknown
- };
+ // Catch-all error state.
+ WriteOpState_Unknown
+};
+
+/**
+ * State of a single write item in-progress from a client request.
+ *
+ * The lifecyle of a write op:
+ *
+ * 0. Begins at _Ready,
+ *
+ * 1a. Targeted, and a ChildWriteOp created to track the state of each returned TargetedWrite.
+ * The state is changed to _Pending.
+ * 1b. If the op cannot be targeted, the error is set directly (_Error), and the write op is
+ * completed.
+ *
+ * 2a. The current TargetedWrites are cancelled, and the op state is reset to _Ready
+ * 2b. TargetedWrites finish successfully and unsuccessfully.
+ *
+ * On the last error arriving...
+ *
+ * 3a. If the errors allow for retry, the WriteOp is reset to _Ready, previous ChildWriteOps
+ * are placed in the history, and goto 0.
+ * 3b. If the errors don't allow for retry, they are combined into a single error and the
+ * state is changed to _Error.
+ * 3c. If there are no errors, the state is changed to _Completed.
+ *
+ * WriteOps finish in a _Completed or _Error state.
+ */
+class WriteOp {
+public:
+ WriteOp(const BatchItemRef& itemRef) : _itemRef(itemRef), _state(WriteOpState_Ready) {}
+
+ ~WriteOp();
/**
- * State of a single write item in-progress from a client request.
- *
- * The lifecyle of a write op:
+ * Returns the write item for this operation
+ */
+ const BatchItemRef& getWriteItem() const;
+
+ /**
+ * Returns the op's current state.
+ */
+ WriteOpState getWriteState() const;
+
+ /**
+ * Returns the op's error.
*
- * 0. Begins at _Ready,
+ * Can only be used in state _Error
+ */
+ const WriteErrorDetail& getOpError() const;
+
+ /**
+ * Creates TargetedWrite operations for every applicable shard, which contain the
+ * information needed to send the child writes generated from this write item.
*
- * 1a. Targeted, and a ChildWriteOp created to track the state of each returned TargetedWrite.
- * The state is changed to _Pending.
- * 1b. If the op cannot be targeted, the error is set directly (_Error), and the write op is
- * completed.
+ * The ShardTargeter determines the ShardEndpoints to send child writes to, but is not
+ * modified by this operation.
*
- * 2a. The current TargetedWrites are cancelled, and the op state is reset to _Ready
- * 2b. TargetedWrites finish successfully and unsuccessfully.
+ * Returns !OK if the targeting process itself fails
+ * (no TargetedWrites will be added, state unchanged)
+ */
+ Status targetWrites(const NSTargeter& targeter, std::vector<TargetedWrite*>* targetedWrites);
+
+ /**
+ * Returns the number of child writes that were last targeted.
+ */
+ size_t getNumTargeted();
+
+ /**
+ * Resets the state of this write op to _Ready and stops waiting for any outstanding
+ * TargetedWrites. Optional error can be provided for reporting.
*
- * On the last error arriving...
+ * Can only be called when state is _Pending, or is a no-op if called when the state
+ * is still _Ready (and therefore no writes are pending).
+ */
+ void cancelWrites(const WriteErrorDetail* why);
+
+ /**
+ * Marks the targeted write as finished for this write op.
*
- * 3a. If the errors allow for retry, the WriteOp is reset to _Ready, previous ChildWriteOps
- * are placed in the history, and goto 0.
- * 3b. If the errors don't allow for retry, they are combined into a single error and the
- * state is changed to _Error.
- * 3c. If there are no errors, the state is changed to _Completed.
+ * One of noteWriteComplete or noteWriteError should be called exactly once for every
+ * TargetedWrite.
+ */
+ void noteWriteComplete(const TargetedWrite& targetedWrite);
+
+ /**
+ * Stores the error response of a TargetedWrite for later use, marks the write as finished.
*
- * WriteOps finish in a _Completed or _Error state.
+ * As above, one of noteWriteComplete or noteWriteError should be called exactly once for
+ * every TargetedWrite.
*/
- class WriteOp {
- public:
-
- WriteOp( const BatchItemRef& itemRef ) :
- _itemRef( itemRef ), _state( WriteOpState_Ready ) {
- }
-
- ~WriteOp();
-
- /**
- * Returns the write item for this operation
- */
- const BatchItemRef& getWriteItem() const;
-
- /**
- * Returns the op's current state.
- */
- WriteOpState getWriteState() const;
-
- /**
- * Returns the op's error.
- *
- * Can only be used in state _Error
- */
- const WriteErrorDetail& getOpError() const;
-
- /**
- * Creates TargetedWrite operations for every applicable shard, which contain the
- * information needed to send the child writes generated from this write item.
- *
- * The ShardTargeter determines the ShardEndpoints to send child writes to, but is not
- * modified by this operation.
- *
- * Returns !OK if the targeting process itself fails
- * (no TargetedWrites will be added, state unchanged)
- */
- Status targetWrites( const NSTargeter& targeter,
- std::vector<TargetedWrite*>* targetedWrites );
-
- /**
- * Returns the number of child writes that were last targeted.
- */
- size_t getNumTargeted();
-
- /**
- * Resets the state of this write op to _Ready and stops waiting for any outstanding
- * TargetedWrites. Optional error can be provided for reporting.
- *
- * Can only be called when state is _Pending, or is a no-op if called when the state
- * is still _Ready (and therefore no writes are pending).
- */
- void cancelWrites( const WriteErrorDetail* why );
-
- /**
- * Marks the targeted write as finished for this write op.
- *
- * One of noteWriteComplete or noteWriteError should be called exactly once for every
- * TargetedWrite.
- */
- void noteWriteComplete( const TargetedWrite& targetedWrite );
-
- /**
- * Stores the error response of a TargetedWrite for later use, marks the write as finished.
- *
- * As above, one of noteWriteComplete or noteWriteError should be called exactly once for
- * every TargetedWrite.
- */
- void noteWriteError( const TargetedWrite& targetedWrite, const WriteErrorDetail& error );
-
- /**
- * Sets the error for this write op directly, and forces the state to _Error.
- *
- * Should only be used when in state _Ready.
- */
- void setOpError( const WriteErrorDetail& error );
-
- private:
-
- /**
- * Updates the op state after new information is received.
- */
- void updateOpState();
-
- // Owned elsewhere, reference to a batch with a write item
- const BatchItemRef _itemRef;
-
- // What stage of the operation we are at
- WriteOpState _state;
-
- // filled when state == _Pending
- std::vector<ChildWriteOp*> _childOps;
-
- // filled when state == _Error
- std::unique_ptr<WriteErrorDetail> _error;
-
- // Finished child operations, for debugging
- std::vector<ChildWriteOp*> _history;
- };
+ void noteWriteError(const TargetedWrite& targetedWrite, const WriteErrorDetail& error);
/**
- * State of a write in-progress (to a single shard) which is one part of a larger write
- * operation.
+ * Sets the error for this write op directly, and forces the state to _Error.
*
- * As above, the write op may finish in either a successful (_Completed) or unsuccessful
- * (_Error) state.
+ * Should only be used when in state _Ready.
*/
- struct ChildWriteOp {
+ void setOpError(const WriteErrorDetail& error);
- ChildWriteOp( WriteOp* const parent ) :
- parentOp( parent ), state( WriteOpState_Ready ), pendingWrite( NULL ) {
- }
+private:
+ /**
+ * Updates the op state after new information is received.
+ */
+ void updateOpState();
- const WriteOp* const parentOp;
- WriteOpState state;
+ // Owned elsewhere, reference to a batch with a write item
+ const BatchItemRef _itemRef;
- // non-zero when state == _Pending
- // Not owned here but tracked for reporting
- TargetedWrite* pendingWrite;
+ // What stage of the operation we are at
+ WriteOpState _state;
- // filled when state > _Pending
- std::unique_ptr<ShardEndpoint> endpoint;
+ // filled when state == _Pending
+ std::vector<ChildWriteOp*> _childOps;
- // filled when state == _Error or (optionally) when state == _Cancelled
- std::unique_ptr<WriteErrorDetail> error;
- };
+ // filled when state == _Error
+ std::unique_ptr<WriteErrorDetail> _error;
- // First value is write item index in the batch, second value is child write op index
- typedef std::pair<int, int> WriteOpRef;
+ // Finished child operations, for debugging
+ std::vector<ChildWriteOp*> _history;
+};
- /**
- * A write with A) a request targeted at a particular shard endpoint, and B) a response targeted
- * at a particular WriteOp.
- *
- * TargetedWrites are the link between the RPC layer and the in-progress write
- * operation.
- */
- struct TargetedWrite {
+/**
+ * State of a write in-progress (to a single shard) which is one part of a larger write
+ * operation.
+ *
+ * As above, the write op may finish in either a successful (_Completed) or unsuccessful
+ * (_Error) state.
+ */
+struct ChildWriteOp {
+ ChildWriteOp(WriteOp* const parent)
+ : parentOp(parent), state(WriteOpState_Ready), pendingWrite(NULL) {}
- TargetedWrite( const ShardEndpoint& endpoint, WriteOpRef writeOpRef ) :
- endpoint( endpoint ), writeOpRef( writeOpRef ) {
- }
+ const WriteOp* const parentOp;
+ WriteOpState state;
- // Where to send the write
- ShardEndpoint endpoint;
+ // non-zero when state == _Pending
+ // Not owned here but tracked for reporting
+ TargetedWrite* pendingWrite;
- // Where to find the write item and put the response
- // TODO: Could be a more complex handle, shared between write state and networking code if
- // we need to be able to cancel ops.
- WriteOpRef writeOpRef;
- };
+ // filled when state > _Pending
+ std::unique_ptr<ShardEndpoint> endpoint;
+ // filled when state == _Error or (optionally) when state == _Cancelled
+ std::unique_ptr<WriteErrorDetail> error;
+};
+
+// First value is write item index in the batch, second value is child write op index
+typedef std::pair<int, int> WriteOpRef;
+
+/**
+ * A write with A) a request targeted at a particular shard endpoint, and B) a response targeted
+ * at a particular WriteOp.
+ *
+ * TargetedWrites are the link between the RPC layer and the in-progress write
+ * operation.
+ */
+struct TargetedWrite {
+ TargetedWrite(const ShardEndpoint& endpoint, WriteOpRef writeOpRef)
+ : endpoint(endpoint), writeOpRef(writeOpRef) {}
+
+ // Where to send the write
+ ShardEndpoint endpoint;
+
+ // Where to find the write item and put the response
+ // TODO: Could be a more complex handle, shared between write state and networking code if
+ // we need to be able to cancel ops.
+ WriteOpRef writeOpRef;
+};
}
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index fc5caa05dfb..b836caae80c 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -39,365 +39,322 @@
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- using namespace mongo;
+using namespace mongo;
- WriteErrorDetail* buildError( int code, const BSONObj& info, const string& message ) {
+WriteErrorDetail* buildError(int code, const BSONObj& info, const string& message) {
+ WriteErrorDetail* error = new WriteErrorDetail();
+ error->setErrCode(code);
+ error->setErrInfo(info);
+ error->setErrMessage(message);
- WriteErrorDetail* error = new WriteErrorDetail();
- error->setErrCode( code );
- error->setErrInfo( info );
- error->setErrMessage( message );
+ return error;
+}
- return error;
- }
+TEST(WriteOpTests, BasicError) {
+ //
+ // Test of basic error-setting on write op
+ //
- TEST(WriteOpTests, BasicError) {
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS("foo.bar");
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- //
- // Test of basic error-setting on write op
- //
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( "foo.bar" );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ unique_ptr<WriteErrorDetail> error(
+ buildError(ErrorCodes::UnknownError, BSON("data" << 12345), "some message"));
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ writeOp.setOpError(*error);
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Error);
+ ASSERT_EQUALS(writeOp.getOpError().getErrCode(), error->getErrCode());
+ ASSERT_EQUALS(writeOp.getOpError().getErrInfo()["data"].Int(),
+ error->getErrInfo()["data"].Int());
+ ASSERT_EQUALS(writeOp.getOpError().getErrMessage(), error->getErrMessage());
+}
- unique_ptr<WriteErrorDetail> error( buildError( ErrorCodes::UnknownError,
- BSON( "data" << 12345 ),
- "some message" ) );
+TEST(WriteOpTests, TargetSingle) {
+ //
+ // Basic targeting test
+ //
- writeOp.setOpError( *error );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Error );
- ASSERT_EQUALS( writeOp.getOpError().getErrCode(), error->getErrCode() );
- ASSERT_EQUALS( writeOp.getOpError().getErrInfo()["data"].Int(),
- error->getErrInfo()["data"].Int() );
- ASSERT_EQUALS( writeOp.getOpError().getErrMessage(), error->getErrMessage() );
- }
+ NamespaceString nss("foo.bar");
- TEST(WriteOpTests, TargetSingle) {
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- //
- // Basic targeting test
- //
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- NamespaceString nss( "foo.bar" );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
+ // Do single-target write op
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- // Do single-target write op
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->endpoint, endpoint);
- MockNSTargeter targeter;
- targeter.init( mockRanges );
+ writeOp.noteWriteComplete(*targeted.front());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Completed);
+}
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
+BatchedDeleteDocument* buildDeleteDoc(const BSONObj& doc) {
+ BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument();
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->endpoint, endpoint );
+ string errMsg;
+ bool ok = deleteDoc->parseBSON(doc, &errMsg);
+ ASSERT_EQUALS(errMsg, "");
+ ASSERT(ok);
+ return deleteDoc;
+}
- writeOp.noteWriteComplete( *targeted.front() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Completed );
+struct EndpointComp {
+ bool operator()(const TargetedWrite* writeA, const TargetedWrite* writeB) const {
+ return writeA->endpoint.shardName.compare(writeB->endpoint.shardName) < 0;
}
+};
- BatchedDeleteDocument* buildDeleteDoc( const BSONObj& doc ) {
+inline void sortByEndpoint(vector<TargetedWrite*>* writes) {
+ std::sort(writes->begin(), writes->end(), EndpointComp());
+}
- BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument();
+TEST(WriteOpTests, TargetMultiOneShard) {
+ //
+ // Multi-write targeting test where our query goes to one shard
+ //
- string errMsg;
- bool ok = deleteDoc->parseBSON( doc, &errMsg );
- ASSERT_EQUALS( errMsg, "" );
- ASSERT( ok );
- return deleteDoc;
- }
+ NamespaceString nss("foo.bar");
- struct EndpointComp {
- bool operator()( const TargetedWrite* writeA, const TargetedWrite* writeB ) const {
- return writeA->endpoint.shardName.compare( writeB->endpoint.shardName ) < 0;
- }
- };
+ ShardEndpoint endpointA("shardA", ChunkVersion(10, 0, OID()));
+ ShardEndpoint endpointB("shardB", ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointC("shardB", ChunkVersion(20, 0, OID()));
- inline void sortByEndpoint( vector<TargetedWrite*>* writes ) {
- std::sort( writes->begin(), writes->end(), EndpointComp() );
- }
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
+ mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << 10)));
+ mockRanges.push_back(new MockRange(endpointC, nss, BSON("x" << 10), BSON("x" << MAXKEY)));
- TEST(WriteOpTests, TargetMultiOneShard) {
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ // Only hits first shard
+ BSONObj query = BSON("x" << GTE << -2 << LT << -1);
+ request.getDeleteRequest()->addToDeletes(buildDeleteDoc(BSON("q" << query)));
- //
- // Multi-write targeting test where our query goes to one shard
- //
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- NamespaceString nss( "foo.bar" );
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- ShardEndpoint endpointA( "shardA", ChunkVersion(10, 0, OID()) );
- ShardEndpoint endpointB( "shardB", ChunkVersion(20, 0, OID()) );
- ShardEndpoint endpointC( "shardB", ChunkVersion(20, 0, OID()) );
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpointA,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << 0 ) ) );
- mockRanges.push_back( new MockRange( endpointB,
- nss,
- BSON( "x" << 0 ),
- BSON( "x" << 10 ) ) );
- mockRanges.push_back( new MockRange( endpointC,
- nss,
- BSON( "x" << 10 ),
- BSON( "x" << MAXKEY ) ) );
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->endpoint, endpointA);
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- // Only hits first shard
- BSONObj query = BSON( "x" << GTE << -2 << LT << -1 );
- request.getDeleteRequest()->addToDeletes( buildDeleteDoc( BSON( "q" << query ) ) );
+ writeOp.noteWriteComplete(*targeted.front());
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Completed);
+}
- MockNSTargeter targeter;
- targeter.init( mockRanges );
+TEST(WriteOpTests, TargetMultiAllShards) {
+ //
+ // Multi-write targeting test where our write goes to more than one shard
+ //
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
+ NamespaceString nss("foo.bar");
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->endpoint, endpointA );
+ ShardEndpoint endpointA("shardA", ChunkVersion(10, 0, OID()));
+ ShardEndpoint endpointB("shardB", ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointC("shardB", ChunkVersion(20, 0, OID()));
- writeOp.noteWriteComplete( *targeted.front() );
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
+ mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << 10)));
+ mockRanges.push_back(new MockRange(endpointC, nss, BSON("x" << 10), BSON("x" << MAXKEY)));
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Completed );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Delete);
+ request.setNS(nss.ns());
+ BSONObj query = BSON("x" << GTE << -1 << LT << 1);
+ request.getDeleteRequest()->addToDeletes(buildDeleteDoc(BSON("q" << query)));
- }
+ // Do multi-target write op
- TEST(WriteOpTests, TargetMultiAllShards) {
-
- //
- // Multi-write targeting test where our write goes to more than one shard
- //
-
- NamespaceString nss( "foo.bar" );
-
- ShardEndpoint endpointA( "shardA", ChunkVersion(10, 0, OID()) );
- ShardEndpoint endpointB( "shardB", ChunkVersion(20, 0, OID()) );
- ShardEndpoint endpointC( "shardB", ChunkVersion(20, 0, OID()) );
-
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpointA,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << 0 ) ) );
- mockRanges.push_back( new MockRange( endpointB,
- nss,
- BSON( "x" << 0 ),
- BSON( "x" << 10 ) ) );
- mockRanges.push_back( new MockRange( endpointC,
- nss,
- BSON( "x" << 10 ),
- BSON( "x" << MAXKEY ) ) );
-
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Delete );
- request.setNS( nss.ns() );
- BSONObj query = BSON( "x" << GTE << -1 << LT << 1 );
- request.getDeleteRequest()->addToDeletes( buildDeleteDoc( BSON( "q" << query ) ) );
-
- // Do multi-target write op
-
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
-
- MockNSTargeter targeter;
- targeter.init( mockRanges );
-
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
-
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 3u );
- sortByEndpoint( &targeted );
- ASSERT_EQUALS( targeted[0]->endpoint.shardName, endpointA.shardName );
- ASSERT( ChunkVersion::isIgnoredVersion( targeted[0]->endpoint.shardVersion ) );
- ASSERT_EQUALS( targeted[1]->endpoint.shardName, endpointB.shardName );
- ASSERT( ChunkVersion::isIgnoredVersion( targeted[1]->endpoint.shardVersion ) );
- ASSERT_EQUALS( targeted[2]->endpoint.shardName, endpointC.shardName );
- ASSERT( ChunkVersion::isIgnoredVersion( targeted[2]->endpoint.shardVersion ) );
-
- writeOp.noteWriteComplete( *targeted[0] );
- writeOp.noteWriteComplete( *targeted[1] );
- writeOp.noteWriteComplete( *targeted[2] );
-
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Completed );
- }
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- TEST(WriteOpTests, ErrorSingle) {
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- //
- // Single error after targeting test
- //
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- NamespaceString nss( "foo.bar" );
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 3u);
+ sortByEndpoint(&targeted);
+ ASSERT_EQUALS(targeted[0]->endpoint.shardName, endpointA.shardName);
+ ASSERT(ChunkVersion::isIgnoredVersion(targeted[0]->endpoint.shardVersion));
+ ASSERT_EQUALS(targeted[1]->endpoint.shardName, endpointB.shardName);
+ ASSERT(ChunkVersion::isIgnoredVersion(targeted[1]->endpoint.shardVersion));
+ ASSERT_EQUALS(targeted[2]->endpoint.shardName, endpointC.shardName);
+ ASSERT(ChunkVersion::isIgnoredVersion(targeted[2]->endpoint.shardVersion));
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
+ writeOp.noteWriteComplete(*targeted[0]);
+ writeOp.noteWriteComplete(*targeted[1]);
+ writeOp.noteWriteComplete(*targeted[2]);
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Completed);
+}
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+TEST(WriteOpTests, ErrorSingle) {
+ //
+ // Single error after targeting test
+ //
- // Do single-target write op
+ NamespaceString nss("foo.bar");
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- MockNSTargeter targeter;
- targeter.init( mockRanges );
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->endpoint, endpoint );
+ // Do single-target write op
- unique_ptr<WriteErrorDetail> error( buildError( ErrorCodes::UnknownError,
- BSON( "data" << 12345 ),
- "some message" ) );
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- writeOp.noteWriteError( *targeted.front(), *error );
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Error );
- ASSERT_EQUALS( writeOp.getOpError().getErrCode(), error->getErrCode() );
- ASSERT_EQUALS( writeOp.getOpError().getErrInfo()["data"].Int(),
- error->getErrInfo()["data"].Int() );
- ASSERT_EQUALS( writeOp.getOpError().getErrMessage(), error->getErrMessage() );
- }
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- TEST(WriteOpTests, CancelSingle) {
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->endpoint, endpoint);
- //
- // Cancel single targeting test
- //
+ unique_ptr<WriteErrorDetail> error(
+ buildError(ErrorCodes::UnknownError, BSON("data" << 12345), "some message"));
- NamespaceString nss( "foo.bar" );
+ writeOp.noteWriteError(*targeted.front(), *error);
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Error);
+ ASSERT_EQUALS(writeOp.getOpError().getErrCode(), error->getErrCode());
+ ASSERT_EQUALS(writeOp.getOpError().getErrInfo()["data"].Int(),
+ error->getErrInfo()["data"].Int());
+ ASSERT_EQUALS(writeOp.getOpError().getErrMessage(), error->getErrMessage());
+}
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
+TEST(WriteOpTests, CancelSingle) {
+ //
+ // Cancel single targeting test
+ //
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ NamespaceString nss("foo.bar");
- // Do single-target write op
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- MockNSTargeter targeter;
- targeter.init( mockRanges );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
+ // Do single-target write op
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->endpoint, endpoint );
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- writeOp.cancelWrites( NULL );
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
- }
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- //
- // Test retryable errors
- //
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->endpoint, endpoint);
+
+ writeOp.cancelWrites(NULL);
+
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
+}
- TEST(WriteOpTests, RetrySingleOp) {
+//
+// Test retryable errors
+//
- //
- // Retry single targeting test
- //
+TEST(WriteOpTests, RetrySingleOp) {
+ //
+ // Retry single targeting test
+ //
- NamespaceString nss( "foo.bar" );
+ NamespaceString nss("foo.bar");
- ShardEndpoint endpoint( "shard", ChunkVersion::IGNORED() );
+ ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
- vector<MockRange*> mockRanges;
- mockRanges.push_back( new MockRange( endpoint,
- nss,
- BSON( "x" << MINKEY ),
- BSON( "x" << MAXKEY ) ) );
+ vector<MockRange*> mockRanges;
+ mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- BatchedCommandRequest request( BatchedCommandRequest::BatchType_Insert );
- request.setNS( nss.ns() );
- request.getInsertRequest()->addToDocuments( BSON( "x" << 1 ) );
+ BatchedCommandRequest request(BatchedCommandRequest::BatchType_Insert);
+ request.setNS(nss.ns());
+ request.getInsertRequest()->addToDocuments(BSON("x" << 1));
- // Do single-target write op
+ // Do single-target write op
- WriteOp writeOp( BatchItemRef( &request, 0 ) );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
+ WriteOp writeOp(BatchItemRef(&request, 0));
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
- MockNSTargeter targeter;
- targeter.init( mockRanges );
+ MockNSTargeter targeter;
+ targeter.init(mockRanges);
- OwnedPointerVector<TargetedWrite> targetedOwned;
- vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites( targeter, &targeted );
+ OwnedPointerVector<TargetedWrite> targetedOwned;
+ vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
+ Status status = writeOp.targetWrites(targeter, &targeted);
- ASSERT( status.isOK() );
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Pending );
- ASSERT_EQUALS( targeted.size(), 1u );
- assertEndpointsEqual( targeted.front()->endpoint, endpoint );
+ ASSERT(status.isOK());
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
+ ASSERT_EQUALS(targeted.size(), 1u);
+ assertEndpointsEqual(targeted.front()->endpoint, endpoint);
- // Stale exception
+ // Stale exception
- unique_ptr<WriteErrorDetail> error( buildError( ErrorCodes::StaleShardVersion,
- BSON( "data" << 12345 ),
- "some message" ) );
+ unique_ptr<WriteErrorDetail> error(
+ buildError(ErrorCodes::StaleShardVersion, BSON("data" << 12345), "some message"));
- writeOp.noteWriteError( *targeted.front(), *error );
+ writeOp.noteWriteError(*targeted.front(), *error);
- ASSERT_EQUALS( writeOp.getWriteState(), WriteOpState_Ready );
- }
+ ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
+}
-} // unnamed namespace
+} // unnamed namespace