diff options
-rw-r--r-- | src/mongo/db/field_parser-inl.h | 75 | ||||
-rw-r--r-- | src/mongo/db/field_parser.h | 4 | ||||
-rw-r--r-- | src/mongo/db/s/cleanup_orphaned_cmd.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/s/merge_chunks_command.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_merge_chunks_cmd.cpp | 8 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_split_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batch_write_exec.cpp | 19 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batch_write_op.cpp | 140 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batch_write_op.h | 28 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batched_command_response.cpp | 55 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batched_command_response.h | 3 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batched_upsert_detail.cpp | 8 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batched_upsert_detail.h | 4 |
13 files changed, 118 insertions, 238 deletions
diff --git a/src/mongo/db/field_parser-inl.h b/src/mongo/db/field_parser-inl.h index b121659f59f..47cdf09c808 100644 --- a/src/mongo/db/field_parser-inl.h +++ b/src/mongo/db/field_parser-inl.h @@ -30,8 +30,7 @@ #include "mongo/util/mongoutils/str.h" namespace mongo { - -using mongoutils::str::stream; +namespace { template <class T> void _genFieldErrMsg(const BSONElement& elem, @@ -40,10 +39,19 @@ void _genFieldErrMsg(const BSONElement& elem, std::string* errMsg) { if (!errMsg) return; - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " << expected - << ", found " << elem.toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " << expected + << ", found " << elem.toString(); +} + +template <typename T> +void _clearOwnedVector(std::vector<T*>* vec) { + for (typename std::vector<T*>::iterator it = vec->begin(); it != vec->end(); ++it) { + delete (*it); + } } +} // namespace + template <typename T> FieldParser::FieldState FieldParser::extract(BSONObj doc, const BSONField<T>& field, @@ -121,9 +129,9 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc, if (elem.type() != Object && elem.type() != Array) { if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " - << "vector or array" - << ", found " << doc[field.name()].toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " + << "vector or array" + << ", found " << doc[field.name()].toString(); } return FIELD_INVALID; } @@ -176,8 +184,8 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, if (!FieldParser::extract(next, fieldFor, &out->at(initialSize + i), &elErrMsg)) { if (errMsg) { - *errMsg = stream() << "error parsing element " << i << " of field " << field() - << causedBy(elErrMsg); + *errMsg = str::stream() << "error parsing element " << i << " of field " + << field() << causedBy(elErrMsg); } return FIELD_INVALID; } @@ -188,9 +196,9 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, } if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " - << "vector array" - << ", found " << elem.toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " + << "vector array" + << ", found " << elem.toString(); } return FIELD_INVALID; } @@ -217,9 +225,9 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, std::string* errMsg) { if (elem.type() != Array) { if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " - << "vector array" - << ", found " << elem.toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " + << "vector array" + << ", found " << elem.toString(); } return FIELD_INVALID; } @@ -231,15 +239,15 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, if (next.type() != Object) { if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field contents, " - << "expected object, found " << elem.type(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field contents, " + << "expected object, found " << elem.type(); } return FIELD_INVALID; } std::unique_ptr<T> toInsert(new T); - if (!toInsert->parseBSON(next.embeddedObject(), errMsg) || !toInsert->isValid(errMsg)) { + if (!toInsert->parseBSON(next.embeddedObject(), errMsg)) { return FIELD_INVALID; } @@ -250,13 +258,6 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, } template <typename T> -void FieldParser::clearOwnedVector(std::vector<T*>* vec) { - for (typename std::vector<T*>::iterator it = vec->begin(); it != vec->end(); ++it) { - delete (*it); - } -} - -template <typename T> FieldParser::FieldState FieldParser::extract(BSONObj doc, const BSONField<std::vector<T*>>& field, std::vector<T*>** out, @@ -270,9 +271,9 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc, if (elem.type() != Array) { if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " - << "vector array" - << ", found " << doc[field.name()].toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " + << "vector array" + << ", found " << doc[field.name()].toString(); } return FIELD_INVALID; } @@ -286,16 +287,16 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc, if (next.type() != Object) { if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field contents, " - << "expected object, found " << elem.type(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field contents, " + << "expected object, found " << elem.type(); } - clearOwnedVector(tempVector.get()); + _clearOwnedVector(tempVector.get()); return FIELD_INVALID; } std::unique_ptr<T> toInsert(new T); if (!toInsert->parseBSON(next.embeddedObject(), errMsg)) { - clearOwnedVector(tempVector.get()); + _clearOwnedVector(tempVector.get()); return FIELD_INVALID; } @@ -341,8 +342,8 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, BSONField<T> fieldFor(next.fieldName(), value); if (!FieldParser::extract(next, fieldFor, &value, &elErrMsg)) { if (errMsg) { - *errMsg = stream() << "error parsing map element " << next.fieldName() - << " of field " << field() << causedBy(elErrMsg); + *errMsg = str::stream() << "error parsing map element " << next.fieldName() + << " of field " << field() << causedBy(elErrMsg); } return FIELD_INVALID; } @@ -352,9 +353,9 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem, } if (errMsg) { - *errMsg = stream() << "wrong type for '" << field() << "' field, expected " - << "vector array" - << ", found " << elem.toString(); + *errMsg = str::stream() << "wrong type for '" << field() << "' field, expected " + << "vector array" + << ", found " << elem.toString(); } return FIELD_INVALID; } diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h index 20840c57f2a..978f6ff7435 100644 --- a/src/mongo/db/field_parser.h +++ b/src/mongo/db/field_parser.h @@ -318,10 +318,6 @@ public: const BSONField<std::map<K, T>>& field, std::map<K, T>* out, std::string* errMsg = NULL); - -private: - template <typename T> - static void clearOwnedVector(std::vector<T*>* vec); }; } // namespace mongo diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp index 99a8ee8fc10..623312b1cdd 100644 --- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp +++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp @@ -88,9 +88,9 @@ CleanupResult cleanupOrphanedData(OperationContext* opCtx, BSONObj keyPattern = metadata->getKeyPattern(); if (!startingFromKey.isEmpty()) { if (!metadata->isValidKey(startingFromKey)) { - *errMsg = stream() << "could not cleanup orphaned data, start key " - << startingFromKey << " does not match shard key pattern " - << keyPattern; + *errMsg = str::stream() << "could not cleanup orphaned data, start key " + << startingFromKey << " does not match shard key pattern " + << keyPattern; log() << *errMsg; return CleanupResult_Error; diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index a589e580864..60bc1918abf 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -49,9 +49,9 @@ namespace mongo { using std::string; -using std::stringstream; using std::shared_ptr; using std::vector; +using str::stream; namespace { @@ -320,7 +320,7 @@ class MergeChunksCommand : public ErrmsgCommandDeprecated { public: MergeChunksCommand() : ErrmsgCommandDeprecated("mergeChunks") {} - void help(stringstream& h) const override { + void help(std::stringstream& h) const override { h << "Merge Chunks command\n" << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ]," << " (opt) epoch : <epoch> }"; diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp index b4bd8c04e9a..416bd0a12ee 100644 --- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp +++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp @@ -141,10 +141,10 @@ public: if (!cm->getShardKeyPattern().isShardKey(minKey) || !cm->getShardKeyPattern().isShardKey(maxKey)) { - errmsg = stream() << "shard key bounds " - << "[" << minKey << "," << maxKey << ")" - << " are not valid for shard key pattern " - << cm->getShardKeyPattern().toBSON(); + errmsg = str::stream() << "shard key bounds " + << "[" << minKey << "," << maxKey << ")" + << " are not valid for shard key pattern " + << cm->getShardKeyPattern().toBSON(); return false; } diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp index 5c7a4f469fb..325333852a4 100644 --- a/src/mongo/s/commands/cluster_split_cmd.cpp +++ b/src/mongo/s/commands/cluster_split_cmd.cpp @@ -196,7 +196,7 @@ public: BSONObj shardKey = uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(opCtx, find)); if (shardKey.isEmpty()) { - errmsg = stream() << "no shard key found in chunk query " << find; + errmsg = str::stream() << "no shard key found in chunk query " << find; return false; } diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp index a58ff30da32..23f9cfb1f50 100644 --- a/src/mongo/s/write_ops/batch_write_exec.cpp +++ b/src/mongo/s/write_ops/batch_write_exec.cpp @@ -68,10 +68,10 @@ WriteErrorDetail errorFromStatus(const Status& status) { } // Helper to note several stale errors from a response -void noteStaleResponses(const std::vector<ShardError*>& staleErrors, NSTargeter* targeter) { - for (const auto error : staleErrors) { +void noteStaleResponses(const std::vector<ShardError>& staleErrors, NSTargeter* targeter) { + for (const auto& error : staleErrors) { targeter->noteStaleResponse( - error->endpoint, error->error.isErrInfoSet() ? error->error.getErrInfo() : BSONObj()); + error.endpoint, error.error.isErrInfoSet() ? error.error.getErrInfo() : BSONObj()); } } @@ -143,8 +143,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, // Send all child batches // + const size_t numToSend = childBatches.size(); size_t numSent = 0; - size_t numToSend = childBatches.size(); + while (numSent != numToSend) { // Collect batches out on the network, mapped by endpoint OwnedShardBatchMap ownedPendingBatches; @@ -165,11 +166,9 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, continue; // If we already have a batch for this shard, wait until the next time - ShardId targetShardId = nextBatch->getEndpoint().shardName; + const auto& targetShardId = nextBatch->getEndpoint().shardName; - OwnedShardBatchMap::MapType::iterator pendingIt = - pendingBatches.find(targetShardId); - if (pendingIt != pendingBatches.end()) + if (pendingBatches.count(targetShardId)) continue; const auto request = [&] { @@ -202,7 +201,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, childBatch.second = nullptr; // Recv-side is responsible for cleaning up the nextBatch when used - pendingBatches.insert(std::make_pair(targetShardId, nextBatch)); + pendingBatches.emplace(targetShardId, nextBatch); } AsyncRequestsSender ars(opCtx, @@ -274,7 +273,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, // Note if anything was stale const auto& staleErrors = trackedErrors.getErrors(ErrorCodes::StaleShardVersion); - if (staleErrors.size() > 0) { + if (!staleErrors.empty()) { noteStaleResponses(staleErrors, &targeter); ++stats->numStaleBatches; } diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp index 4dad23a96a9..51db3c527c9 100644 --- a/src/mongo/s/write_ops/batch_write_op.cpp +++ b/src/mongo/s/write_ops/batch_write_op.cpp @@ -39,7 +39,6 @@ namespace mongo { using std::unique_ptr; -using std::make_pair; using std::set; using std::stringstream; using std::vector; @@ -51,13 +50,6 @@ namespace { // maximum key is 99999) + 1 byte (zero terminator) = 7 bytes const int kBSONArrayPerElementOverheadBytes = 7; -struct BatchSize { - int numOps{0}; - int sizeBytes{0}; -}; - -typedef std::map<const ShardEndpoint*, BatchSize, EndpointComp> TargetedBatchSizeMap; - struct WriteErrorDetailComp { bool operator()(const WriteErrorDetail* errorA, const WriteErrorDetail* errorB) const { return errorA->getIndex() < errorB->getIndex(); @@ -105,8 +97,7 @@ void buildTargetError(const Status& errStatus, WriteErrorDetail* details) { */ bool isNewBatchRequired(const std::vector<TargetedWrite*>& writes, const TargetedBatchMap& batchMap) { - for (vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end(); ++it) { - TargetedWrite* write = *it; + for (const auto write : writes) { if (batchMap.find(&write->endpoint) == batchMap.end()) { return true; } @@ -120,24 +111,22 @@ bool isNewBatchRequired(const std::vector<TargetedWrite*>& writes, */ bool wouldMakeBatchesTooBig(const std::vector<TargetedWrite*>& writes, int writeSizeBytes, - const TargetedBatchSizeMap& batchSizes) { - for (vector<TargetedWrite*>::const_iterator it = writes.begin(); it != writes.end(); ++it) { - const TargetedWrite* write = *it; - TargetedBatchSizeMap::const_iterator seenIt = batchSizes.find(&write->endpoint); - - if (seenIt == batchSizes.end()) { + const TargetedBatchMap& batchMap) { + for (const auto write : writes) { + TargetedBatchMap::const_iterator it = batchMap.find(&write->endpoint); + if (it == batchMap.end()) { // If this is the first item in the batch, it can't be too big continue; } - const BatchSize& batchSize = seenIt->second; + const auto& batch = it->second; - if (batchSize.numOps >= static_cast<int>(write_ops::kMaxWriteBatchSize)) { + if (batch->getNumOps() >= write_ops::kMaxWriteBatchSize) { // Too many items in batch return true; } - if (batchSize.sizeBytes + writeSizeBytes > BSONObjMaxUserSize) { + if (batch->getEstimatedSizeBytes() + writeSizeBytes > BSONObjMaxUserSize) { // Batch would be too big return true; } @@ -176,29 +165,6 @@ int getWriteSizeBytes(const WriteOp& writeOp) { MONGO_UNREACHABLE; } -void cloneCommandErrorTo(const BatchedCommandResponse& batchResp, WriteErrorDetail* details) { - details->setErrCode(batchResp.getErrCode()); - details->setErrMessage(batchResp.getErrMessage()); -} - -void toWriteErrorResponse(const WriteErrorDetail& error, - bool ordered, - int numWrites, - BatchedCommandResponse* writeErrResponse) { - writeErrResponse->setOk(true); - writeErrResponse->setN(0); - - int numErrors = ordered ? 1 : numWrites; - for (int i = 0; i < numErrors; i++) { - unique_ptr<WriteErrorDetail> errorClone(new WriteErrorDetail); - error.cloneTo(errorClone.get()); - errorClone->setIndex(i); - writeErrResponse->addToErrDetails(errorClone.release()); - } - - dassert(writeErrResponse->isValid(NULL)); -} - /** * Given *either* a batch error or an array of per-item errors, copies errors we're interested in * into a TrackedErrorMap @@ -206,12 +172,9 @@ void toWriteErrorResponse(const WriteErrorDetail& error, void trackErrors(const ShardEndpoint& endpoint, const vector<WriteErrorDetail*> itemErrors, TrackedErrors* trackedErrors) { - for (vector<WriteErrorDetail*>::const_iterator it = itemErrors.begin(); it != itemErrors.end(); - ++it) { - const WriteErrorDetail* error = *it; - + for (const auto error : itemErrors) { if (trackedErrors->isTracking(error->getErrCode())) { - trackedErrors->addError(new ShardError(endpoint, *error)); + trackedErrors->addError(ShardError(endpoint, *error)); } } } @@ -271,7 +234,6 @@ Status BatchWriteOp::targetBatch(const NSTargeter& targeter, const bool ordered = _clientRequest.getWriteCommandBase().getOrdered(); TargetedBatchMap batchMap; - TargetedBatchSizeMap batchSizes; int numTargetErrors = 0; @@ -340,10 +302,9 @@ Status BatchWriteOp::targetBatch(const NSTargeter& targeter, const int writeSizeBytes = getWriteSizeBytes(writeOp) + kBSONArrayPerElementOverheadBytes + (_batchTxnNum ? kBSONArrayPerElementOverheadBytes + 4 : 0); - // If this write will push us over some sort of size limit, stop targeting - if (wouldMakeBatchesTooBig(writes, writeSizeBytes, batchSizes)) { + if (wouldMakeBatchesTooBig(writes, writeSizeBytes, batchMap)) { invariant(!batchMap.empty()); - writeOp.cancelWrites(NULL); + writeOp.cancelWrites(nullptr); break; } @@ -351,28 +312,15 @@ Status BatchWriteOp::targetBatch(const NSTargeter& targeter, // Targeting went ok, add to appropriate TargetedBatch // - for (vector<TargetedWrite*>::iterator it = writes.begin(); it != writes.end(); ++it) { - TargetedWrite* write = *it; - + for (const auto write : writes) { TargetedBatchMap::iterator batchIt = batchMap.find(&write->endpoint); - TargetedBatchSizeMap::iterator batchSizeIt = batchSizes.find(&write->endpoint); - if (batchIt == batchMap.end()) { TargetedWriteBatch* newBatch = new TargetedWriteBatch(write->endpoint); - batchIt = batchMap.insert(make_pair(&newBatch->getEndpoint(), newBatch)).first; - batchSizeIt = - batchSizes.insert(make_pair(&newBatch->getEndpoint(), BatchSize())).first; + batchIt = batchMap.emplace(&newBatch->getEndpoint(), newBatch).first; } TargetedWriteBatch* batch = batchIt->second; - BatchSize& batchSize = batchSizeIt->second; - - ++batchSize.numOps; - - // If the request contains transaction number, this means the end result will contain a - // statement ids array, so we need to account for that overhead. - batchSize.sizeBytes += writeSizeBytes; - batch->addWrite(write); + batch->addWrite(write, writeSizeBytes); } // Relinquish ownership of TargetedWrites, now the TargetedBatches own them @@ -402,7 +350,7 @@ Status BatchWriteOp::targetBatch(const NSTargeter& targeter, // Send the handle back to caller invariant(targetedBatches->find(batch->getEndpoint().shardName) == targetedBatches->end()); - targetedBatches->insert(std::make_pair(batch->getEndpoint().shardName, batch)); + targetedBatches->emplace(batch->getEndpoint().shardName, batch); } return Status::OK(); @@ -511,16 +459,16 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, TrackedErrors* trackedErrors) { if (!response.getOk()) { WriteErrorDetail error; - cloneCommandErrorTo(response, &error); + error.setErrCode(response.getErrCode()); + error.setErrMessage(response.getErrMessage()); - // Treat command errors exactly like other failures of the batch - // Note that no errors will be tracked from these failures - as-designed + // Treat command errors exactly like other failures of the batch. + // + // Note that no errors will be tracked from these failures - as-designed. noteBatchError(targetedBatch, error); return; } - dassert(response.getOk()); - // Stop tracking targeted batch _targeted.erase(&targetedBatch); @@ -624,12 +572,21 @@ void BatchWriteOp::noteBatchError(const TargetedWriteBatch& targetedBatch, const WriteErrorDetail& error) { // Treat errors to get a batch response as failures of the contained writes BatchedCommandResponse emulatedResponse; - toWriteErrorResponse(error, - _clientRequest.getWriteCommandBase().getOrdered(), - targetedBatch.getWrites().size(), - &emulatedResponse); + emulatedResponse.setOk(true); + emulatedResponse.setN(0); + + const int numErrors = + _clientRequest.getWriteCommandBase().getOrdered() ? 1 : targetedBatch.getWrites().size(); - noteBatchResponse(targetedBatch, emulatedResponse, NULL); + for (int i = 0; i < numErrors; i++) { + auto errorClone(stdx::make_unique<WriteErrorDetail>()); + error.cloneTo(errorClone.get()); + errorClone->setIndex(i); + emulatedResponse.addToErrDetails(errorClone.release()); + } + + dassert(emulatedResponse.isValid(nullptr)); + noteBatchResponse(targetedBatch, emulatedResponse, nullptr); } void BatchWriteOp::abortBatch(const WriteErrorDetail& error) { @@ -835,38 +792,23 @@ bool EndpointComp::operator()(const ShardEndpoint* endpointA, void TrackedErrors::startTracking(int errCode) { dassert(!isTracking(errCode)); - _errorMap.insert(make_pair(errCode, vector<ShardError*>())); + _errorMap.emplace(errCode, std::vector<ShardError>()); } bool TrackedErrors::isTracking(int errCode) const { - return _errorMap.find(errCode) != _errorMap.end(); + return _errorMap.count(errCode) != 0; } -void TrackedErrors::addError(ShardError* error) { - TrackedErrorMap::iterator seenIt = _errorMap.find(error->error.getErrCode()); +void TrackedErrors::addError(ShardError error) { + TrackedErrorMap::iterator seenIt = _errorMap.find(error.error.getErrCode()); if (seenIt == _errorMap.end()) return; - seenIt->second.push_back(error); + seenIt->second.emplace_back(std::move(error)); } -const vector<ShardError*>& TrackedErrors::getErrors(int errCode) const { +const std::vector<ShardError>& TrackedErrors::getErrors(int errCode) const { dassert(isTracking(errCode)); return _errorMap.find(errCode)->second; } -void TrackedErrors::clear() { - for (TrackedErrorMap::iterator it = _errorMap.begin(); it != _errorMap.end(); ++it) { - vector<ShardError*>& errors = it->second; - - for (vector<ShardError*>::iterator errIt = errors.begin(); errIt != errors.end(); ++errIt) { - delete *errIt; - } - errors.clear(); - } -} - -TrackedErrors::~TrackedErrors() { - clear(); -} - } // namespace mongo diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h index 22d18b579ff..ea48b7639ed 100644 --- a/src/mongo/s/write_ops/batch_write_op.h +++ b/src/mongo/s/write_ops/batch_write_op.h @@ -28,6 +28,7 @@ #pragma once +#include <map> #include <set> #include <vector> @@ -247,11 +248,20 @@ public: return _writes.vector(); } + size_t getNumOps() const { + return _writes.size(); + } + + int getEstimatedSizeBytes() const { + return _estimatedSizeBytes; + } + /** - * TargetedWrite is owned here once given to the TargetedWriteBatch + * TargetedWrite is owned here once given to the TargetedWriteBatch. */ - void addWrite(TargetedWrite* targetedWrite) { + void addWrite(TargetedWrite* targetedWrite, int estWriteSize) { _writes.mutableVector().push_back(targetedWrite); + _estimatedSizeBytes += estWriteSize; } private: @@ -261,6 +271,10 @@ private: // Where the responses go // TargetedWrite*s are owned by the TargetedWriteBatch OwnedPointerVector<TargetedWrite> _writes; + + // Conservatvely estimated size of the batch, for ensuring it doesn't grow past the maximum BSON + // size + int _estimatedSizeBytes{0}; }; /** @@ -268,20 +282,18 @@ private: */ class TrackedErrors { public: - ~TrackedErrors(); + TrackedErrors() = default; void startTracking(int errCode); bool isTracking(int errCode) const; - void addError(ShardError* error); - - const std::vector<ShardError*>& getErrors(int errCode) const; + void addError(ShardError error); - void clear(); + const std::vector<ShardError>& getErrors(int errCode) const; private: - typedef stdx::unordered_map<int, std::vector<ShardError*>> TrackedErrorMap; + using TrackedErrorMap = stdx::unordered_map<int, std::vector<ShardError>>; TrackedErrorMap _errorMap; }; diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp index 26462777aa9..bc154183cac 100644 --- a/src/mongo/s/write_ops/batched_command_response.cpp +++ b/src/mongo/s/write_ops/batched_command_response.cpp @@ -302,61 +302,6 @@ void BatchedCommandResponse::clear() { _wcErrDetails.reset(); } -void BatchedCommandResponse::cloneTo(BatchedCommandResponse* other) const { - other->clear(); - - other->_ok = _ok; - other->_isOkSet = _isOkSet; - - other->_errCode = _errCode; - other->_isErrCodeSet = _isErrCodeSet; - - other->_errMessage = _errMessage; - other->_isErrMessageSet = _isErrMessageSet; - - other->_nModified = _nModified; - other->_isNModifiedSet = _isNModifiedSet; - - other->_n = _n; - other->_isNSet = _isNSet; - - other->_singleUpserted = _singleUpserted; - other->_isSingleUpsertedSet = _isSingleUpsertedSet; - - other->unsetUpsertDetails(); - if (_upsertDetails.get()) { - for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin(); - it != _upsertDetails->end(); - ++it) { - BatchedUpsertDetail* upsertDetailsItem = new BatchedUpsertDetail; - (*it)->cloneTo(upsertDetailsItem); - other->addToUpsertDetails(upsertDetailsItem); - } - } - - other->_lastOp = _lastOp; - other->_isLastOpSet = _isLastOpSet; - - other->_electionId = _electionId; - other->_isElectionIdSet = _isElectionIdSet; - - other->unsetErrDetails(); - if (_writeErrorDetails.get()) { - for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin(); - it != _writeErrorDetails->end(); - ++it) { - WriteErrorDetail* errDetailsItem = new WriteErrorDetail; - (*it)->cloneTo(errDetailsItem); - other->addToErrDetails(errDetailsItem); - } - } - - if (_wcErrDetails.get()) { - other->_wcErrDetails.reset(new WriteConcernErrorDetail()); - _wcErrDetails->cloneTo(other->_wcErrDetails.get()); - } -} - std::string BatchedCommandResponse::toString() const { return toBSON().toString(); } diff --git a/src/mongo/s/write_ops/batched_command_response.h b/src/mongo/s/write_ops/batched_command_response.h index 49579da74a1..844dde4daa3 100644 --- a/src/mongo/s/write_ops/batched_command_response.h +++ b/src/mongo/s/write_ops/batched_command_response.h @@ -68,9 +68,6 @@ public: BatchedCommandResponse(BatchedCommandResponse&&) = default; BatchedCommandResponse& operator=(BatchedCommandResponse&&) = default; - /** Copies all the fields present in 'this' to 'other'. */ - void cloneTo(BatchedCommandResponse* other) const; - bool isValid(std::string* errMsg) const; BSONObj toBSON() const; bool parseBSON(const BSONObj& source, std::string* errMsg); diff --git a/src/mongo/s/write_ops/batched_upsert_detail.cpp b/src/mongo/s/write_ops/batched_upsert_detail.cpp index adbb6cf990c..e3656490daf 100644 --- a/src/mongo/s/write_ops/batched_upsert_detail.cpp +++ b/src/mongo/s/write_ops/batched_upsert_detail.cpp @@ -35,8 +35,6 @@ namespace mongo { using std::string; -using mongoutils::str::stream; - const BSONField<int> BatchedUpsertDetail::index("index"); const BSONField<BSONObj> BatchedUpsertDetail::upsertedID("_id"); @@ -44,8 +42,6 @@ BatchedUpsertDetail::BatchedUpsertDetail() { clear(); } -BatchedUpsertDetail::~BatchedUpsertDetail() {} - BSONObj BatchedUpsertDetail::toBSON() const { BSONObjBuilder builder; @@ -99,10 +95,6 @@ void BatchedUpsertDetail::cloneTo(BatchedUpsertDetail* other) const { other->_isUpsertedIDSet = _isUpsertedIDSet; } -std::string BatchedUpsertDetail::toString() const { - return "implement me"; -} - void BatchedUpsertDetail::setIndex(int index) { _index = index; _isIndexSet = true; diff --git a/src/mongo/s/write_ops/batched_upsert_detail.h b/src/mongo/s/write_ops/batched_upsert_detail.h index 9c19167dc98..7a9c18b6927 100644 --- a/src/mongo/s/write_ops/batched_upsert_detail.h +++ b/src/mongo/s/write_ops/batched_upsert_detail.h @@ -29,9 +29,7 @@ #pragma once #include <string> -#include <vector> -#include "mongo/base/string_data.h" #include "mongo/db/jsobj.h" namespace mongo { @@ -56,7 +54,6 @@ public: // BatchedUpsertDetail(); - ~BatchedUpsertDetail(); /** Copies all the fields present in 'this' to 'other'. */ void cloneTo(BatchedUpsertDetail* other) const; @@ -68,7 +65,6 @@ public: BSONObj toBSON() const; bool parseBSON(const BSONObj& source, std::string* errMsg); void clear(); - std::string toString() const; // // individual field accessors |