summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-12-27 13:11:00 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2017-12-28 21:46:54 -0500
commit10e3ce2d905ca3afc1e5949da2daa3353740171a (patch)
tree0f532002ff244bac41b652daf6c2c1e783a3a25e
parent800f84351f3f4f07aadf84b08be68f6be44fcfca (diff)
downloadmongo-10e3ce2d905ca3afc1e5949da2daa3353740171a.tar.gz
SERVER-28992 Get rid of unused code from the write commands tests
-rw-r--r--src/mongo/db/range_arithmetic.h6
-rw-r--r--src/mongo/db/range_arithmetic_test.cpp1
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp2
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp12
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp7
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp2
-rw-r--r--src/mongo/db/s/collection_metadata.h5
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp4
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp4
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp3
-rw-r--r--src/mongo/db/transaction_reaper.cpp2
-rw-r--r--src/mongo/s/catalog_cache.h1
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.cpp13
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.h27
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp13
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp5
-rw-r--r--src/mongo/s/ns_targeter.h35
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h1
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp25
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp1
-rw-r--r--src/mongo/s/write_ops/batch_write_op.h1
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp19
-rw-r--r--src/mongo/s/write_ops/mock_ns_targeter.h133
-rw-r--r--src/mongo/s/write_ops/write_op.cpp51
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp53
29 files changed, 154 insertions, 289 deletions
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index 91e35d8bd6f..bafcfeb44e1 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -28,12 +28,8 @@
#pragma once
-#include <map>
-#include <string>
-
+#include "mongo/bson/bsonobj.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/s/chunk_version.h"
namespace mongo {
diff --git a/src/mongo/db/range_arithmetic_test.cpp b/src/mongo/db/range_arithmetic_test.cpp
index f4d5f3fb14f..7ee3e7450c8 100644
--- a/src/mongo/db/range_arithmetic_test.cpp
+++ b/src/mongo/db/range_arithmetic_test.cpp
@@ -28,6 +28,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index 036cb89cb75..99fed7b8737 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -623,7 +623,7 @@ void Balancer::_splitOrMarkJumbo(OperationContext* opCtx,
Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, nss));
const auto cm = routingInfo.cm().get();
- auto chunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
+ const auto chunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
try {
const auto splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
index 2bf7db8dc9a..1ebe571a210 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -385,8 +385,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
for (const auto& tagRangeEntry : distribution.tagRanges()) {
const auto& tagRange = tagRangeEntry.second;
- shared_ptr<Chunk> chunkAtZoneMin =
- cm->findIntersectingChunkWithSimpleCollation(tagRange.min);
+ const auto chunkAtZoneMin = cm->findIntersectingChunkWithSimpleCollation(tagRange.min);
invariant(chunkAtZoneMin->getMax().woCompare(tagRange.min) > 0);
if (chunkAtZoneMin->getMin().woCompare(tagRange.min)) {
@@ -397,8 +396,7 @@ StatusWith<SplitInfoVector> BalancerChunkSelectionPolicyImpl::_getSplitCandidate
if (!tagRange.max.woCompare(shardKeyPattern.globalMax()))
continue;
- shared_ptr<Chunk> chunkAtZoneMax =
- cm->findIntersectingChunkWithSimpleCollation(tagRange.max);
+ const auto chunkAtZoneMax = cm->findIntersectingChunkWithSimpleCollation(tagRange.max);
// We need to check that both the chunk's minKey does not match the zone's max and also that
// the max is not equal, which would only happen in the case of the zone ending in MaxKey.
@@ -436,8 +434,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
for (const auto& tagRangeEntry : distribution.tagRanges()) {
const auto& tagRange = tagRangeEntry.second;
- shared_ptr<Chunk> chunkAtZoneMin =
- cm->findIntersectingChunkWithSimpleCollation(tagRange.min);
+ const auto chunkAtZoneMin = cm->findIntersectingChunkWithSimpleCollation(tagRange.min);
if (chunkAtZoneMin->getMin().woCompare(tagRange.min)) {
return {ErrorCodes::IllegalOperation,
@@ -455,8 +452,7 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
if (!tagRange.max.woCompare(shardKeyPattern.globalMax()))
continue;
- shared_ptr<Chunk> chunkAtZoneMax =
- cm->findIntersectingChunkWithSimpleCollation(tagRange.max);
+ const auto chunkAtZoneMax = cm->findIntersectingChunkWithSimpleCollation(tagRange.max);
// We need to check that both the chunk's minKey does not match the zone's max and also that
// the max is not equal, which would only happen in the case of the zone ending in MaxKey.
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index cd2d1baa30e..91af176fa8b 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -190,8 +190,8 @@ Status MigrationManager::executeManualMigration(
auto& routingInfo = routingInfoStatus.getValue();
- auto chunk = routingInfo.cm()->findIntersectingChunkWithSimpleCollation(migrateInfo.minKey);
- invariant(chunk);
+ const auto chunk =
+ routingInfo.cm()->findIntersectingChunkWithSimpleCollation(migrateInfo.minKey);
Status commandStatus = _processRemoteCommandResponse(
remoteCommandResponse, &statusWithScopedMigrationRequest.getValue());
@@ -336,9 +336,8 @@ void MigrationManager::finishRecovery(OperationContext* opCtx,
auto waitForDelete = migrationType.getWaitForDelete();
migrateInfos.pop_front();
- auto chunk =
+ const auto chunk =
routingInfo.cm()->findIntersectingChunkWithSimpleCollation(migrationInfo.minKey);
- invariant(chunk);
if (chunk->getShardId() != migrationInfo.from) {
// Chunk is no longer on the source shard specified by this migration. Erase the
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index 8384048dd18..8c4e0a859e8 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -285,7 +285,7 @@ void ChunkSplitter::_runAutosplit(const NamespaceString& nss,
routingInfo.cm());
const auto cm = routingInfo.cm();
- auto chunk = cm->findIntersectingChunkWithSimpleCollation(min);
+ const auto chunk = cm->findIntersectingChunkWithSimpleCollation(min);
// Stop if chunk's range differs from the range we were expecting to split.
if ((0 != chunk->getMin().woCompare(min)) || (0 != chunk->getMax().woCompare(max)) ||
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 38d5c11819f..fad4cfeae3d 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -29,15 +29,10 @@
#pragma once
#include "mongo/db/range_arithmetic.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/s/shard_key_pattern.h"
namespace mongo {
-class ChunkType;
-
/**
* The collection metadata has metadata information about a collection, in particular the
* sharding information. It's main goal in life is to be capable of answering if a certain
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index 2458d9861c5..a2495eb371f 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -602,8 +602,7 @@ uint64_t CollectionShardingState::_incrementChunkOnInsertOrUpdate(OperationConte
// Use the shard key to locate the chunk into which the document was updated, and increment the
// number of bytes tracked for the chunk. Note that we can assume the simple collation, because
// shard keys do not support non-simple collations.
- std::shared_ptr<Chunk> chunk = cm->findIntersectingChunkWithSimpleCollation(shardKey);
- invariant(chunk);
+ auto chunk = cm->findIntersectingChunkWithSimpleCollation(shardKey);
chunk->addBytesWritten(dataWritten);
// If the chunk becomes too large, then we call the ChunkSplitter to schedule a split. Then, we
@@ -612,6 +611,7 @@ uint64_t CollectionShardingState::_incrementChunkOnInsertOrUpdate(OperationConte
// TODO: call ChunkSplitter here
chunk->clearBytesWritten();
}
+
return chunk->getBytesWritten();
}
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 7853b107279..7cdfa33f98c 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -54,7 +54,7 @@ public:
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 60bc1918abf..2b3818d5d07 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -49,7 +49,6 @@
namespace mongo {
using std::string;
-using std::shared_ptr;
using std::vector;
using str::stream;
@@ -347,7 +346,8 @@ public:
bool slaveOk() const override {
return false;
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
+
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 2c09b55c69d..040d4638c70 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -114,7 +114,8 @@ protected:
invariant(!rangeMapOverlaps(metadata.getChunks(), minKey, maxKey));
auto cm = metadata.getChunkManager();
- auto chunkToSplit = cm->findIntersectingChunkWithSimpleCollation(minKey);
+
+ const auto chunkToSplit = cm->findIntersectingChunkWithSimpleCollation(minKey);
ASSERT(SimpleBSONObjComparator::kInstance.evaluate(maxKey <= chunkToSplit->getMax()))
<< "maxKey == " << maxKey
<< " and chunkToSplit->getMax() == " << chunkToSplit->getMax();
diff --git a/src/mongo/db/transaction_reaper.cpp b/src/mongo/db/transaction_reaper.cpp
index 9877fa4974d..21ce03f7c2a 100644
--- a/src/mongo/db/transaction_reaper.cpp
+++ b/src/mongo/db/transaction_reaper.cpp
@@ -235,7 +235,7 @@ public:
}
ShardId shardId;
if (_cm) {
- auto chunk = _cm->findIntersectingChunkWithSimpleCollation(lsid.toBSON());
+ const auto chunk = _cm->findIntersectingChunkWithSimpleCollation(lsid.toBSON());
shardId = chunk->getShardId();
} else {
shardId = _primary->getId();
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 44abcaaac92..fc451628619 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -32,7 +32,6 @@
#include "mongo/base/string_data.h"
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
diff --git a/src/mongo/s/commands/chunk_manager_targeter.cpp b/src/mongo/s/commands/chunk_manager_targeter.cpp
index 7efc99785ec..4b5dae927d5 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.cpp
+++ b/src/mongo/s/commands/chunk_manager_targeter.cpp
@@ -33,7 +33,6 @@
#include "mongo/s/commands/chunk_manager_targeter.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/collation/collation_index_key.h"
#include "mongo/s/client/shard_registry.h"
@@ -574,7 +573,7 @@ Status ChunkManagerTargeter::targetQuery(
std::unique_ptr<ShardEndpoint> ChunkManagerTargeter::targetShardKey(const BSONObj& shardKey,
const BSONObj& collation,
long long estDataSize) const {
- auto chunk = _routingInfo->cm()->findIntersectingChunk(shardKey, collation);
+ const auto chunk = _routingInfo->cm()->findIntersectingChunk(shardKey, collation);
// Track autosplit stats for sharded collections
// Note: this is only best effort accounting and is not accurate.
@@ -632,6 +631,11 @@ Status ChunkManagerTargeter::targetAllShards(
return Status::OK();
}
+void ChunkManagerTargeter::noteCouldNotTarget() {
+ dassert(_remoteShardVersions.empty());
+ _needsTargetingRefresh = true;
+}
+
void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
const BSONObj& staleInfo) {
dassert(!_needsTargetingRefresh);
@@ -664,11 +668,6 @@ void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
}
}
-void ChunkManagerTargeter::noteCouldNotTarget() {
- dassert(_remoteShardVersions.empty());
- _needsTargetingRefresh = true;
-}
-
Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
bool dummy;
if (!wasChanged) {
diff --git a/src/mongo/s/commands/chunk_manager_targeter.h b/src/mongo/s/commands/chunk_manager_targeter.h
index 33aeb00bd68..2ff268871a4 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.h
+++ b/src/mongo/s/commands/chunk_manager_targeter.h
@@ -29,30 +29,21 @@
#pragma once
#include <map>
-#include <memory>
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobj_comparator_interface.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/ops/write_ops.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/ns_targeter.h"
namespace mongo {
-class ChunkManager;
-class OperationContext;
-class Shard;
-struct ChunkVersion;
-
struct TargeterStats {
- TargeterStats()
- : chunkSizeDelta(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>()) {}
-
- // Map of chunk shard minKey -> approximate delta. This is used for deciding
- // whether a chunk might need splitting or not.
- BSONObjIndexedMap<int> chunkSizeDelta;
+ // Map of chunk shard minKey -> approximate delta. This is used for deciding whether a chunk
+ // might need splitting or not.
+ BSONObjIndexedMap<int> chunkSizeDelta{
+ SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>()};
};
/**
@@ -73,12 +64,12 @@ public:
*/
Status init(OperationContext* opCtx);
- const NamespaceString& getNS() const;
+ const NamespaceString& getNS() const override;
// Returns ShardKeyNotFound if document does not have a full shard key.
Status targetInsert(OperationContext* opCtx,
const BSONObj& doc,
- ShardEndpoint** endpoint) const;
+ ShardEndpoint** endpoint) const override;
// Returns ShardKeyNotFound if the update can't be targeted without a shard key.
Status targetUpdate(OperationContext* opCtx,
@@ -94,9 +85,9 @@ public:
Status targetAllShards(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override;
- void noteStaleResponse(const ShardEndpoint& endpoint, const BSONObj& staleInfo);
+ void noteCouldNotTarget() override;
- void noteCouldNotTarget();
+ void noteStaleResponse(const ShardEndpoint& endpoint, const BSONObj& staleInfo) override;
/**
* Replaces the targeting information with the latest information from the cache. If this
@@ -107,7 +98,7 @@ public:
*
* Also see NSTargeter::refreshIfNeeded().
*/
- Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged);
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) override;
private:
using ShardVersionMap = std::map<ShardId, ChunkVersion>;
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index 86d69329e80..a07a4ae5b88 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -171,7 +171,7 @@ public:
const BSONObj query = cmdObj.getObjectField("query");
const BSONObj collation = getCollation(cmdObj);
const BSONObj shardKey = getShardKey(opCtx, *chunkMgr, query);
- const auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
+ auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
_runCommand(opCtx,
chunk->getShardId(),
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 9467232a3cd..88ad5925067 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -579,12 +579,13 @@ public:
invariant(size < std::numeric_limits<int>::max());
// Key reported should be the chunk's minimum
- auto c = outputCM->findIntersectingChunkWithSimpleCollation(key);
- if (!c) {
+ auto chunkWritten = outputCM->findIntersectingChunkWithSimpleCollation(key);
+ if (!chunkWritten) {
warning() << "Mongod reported " << size << " bytes inserted for key " << key
<< " but can't find chunk";
} else {
- updateChunkWriteStatsAndSplitIfNeeded(opCtx, outputCM.get(), c.get(), size);
+ updateChunkWriteStatsAndSplitIfNeeded(
+ opCtx, outputCM.get(), chunkWritten.get(), size);
}
}
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 416bd0a12ee..db51420bf21 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -44,9 +44,7 @@
namespace mongo {
-using std::shared_ptr;
using std::string;
-using std::stringstream;
using std::vector;
namespace {
@@ -58,7 +56,7 @@ class ClusterMergeChunksCommand : public ErrmsgCommandDeprecated {
public:
ClusterMergeChunksCommand() : ErrmsgCommandDeprecated("mergeChunks") {}
- void help(stringstream& h) const override {
+ void help(std::stringstream& h) const override {
h << "Merge Chunks command\n"
<< "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ] }";
}
@@ -151,7 +149,7 @@ public:
minKey = cm->getShardKeyPattern().normalizeShardKey(minKey);
maxKey = cm->getShardKeyPattern().normalizeShardKey(maxKey);
- shared_ptr<Chunk> firstChunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
+ const auto firstChunk = cm->findIntersectingChunkWithSimpleCollation(minKey);
BSONObjBuilder remoteCmdObjB;
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::nsField()]);
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 2e3b8fcca47..0af81436129 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -49,10 +49,6 @@
#include "mongo/util/timer.h"
namespace mongo {
-
-using std::shared_ptr;
-using std::string;
-
namespace {
class MoveChunkCmd : public ErrmsgCommandDeprecated {
@@ -121,9 +117,10 @@ public:
const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
- string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
- << toString
- << "' because that shard does not exist");
+ std::string msg(str::stream() << "Could not move chunk in '" << nss.ns()
+ << "' to shard '"
+ << toString
+ << "' because that shard does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
@@ -146,7 +143,7 @@ public:
return false;
}
- shared_ptr<Chunk> chunk;
+ std::shared_ptr<Chunk> chunk;
if (!find.isEmpty()) {
// find
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 8cd9654e6ba..8f7581d402e 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -285,12 +285,13 @@ private:
targeter.targetUpdate(opCtx, targetingBatchItem.getUpdate(), &endpoints);
if (!status.isOK())
return status;
- } else {
- invariant(targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Delete);
+ } else if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Delete) {
Status status =
targeter.targetDelete(opCtx, targetingBatchItem.getDelete(), &endpoints);
if (!status.isOK())
return status;
+ } else {
+ MONGO_UNREACHABLE;
}
auto shardRegistry = Grid::get(opCtx)->shardRegistry();
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index aa92b458b88..e3a554cd252 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -28,22 +28,32 @@
#pragma once
-#include <memory>
-#include <string>
#include <vector>
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
-#include "mongo/client/dbclientinterface.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/ops/write_ops.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/shard_id.h"
-#include "mongo/s/write_ops/batched_command_request.h"
namespace mongo {
class OperationContext;
-struct ShardEndpoint;
+
+/**
+ * Combines a shard and the version which that shard should be using
+ */
+struct ShardEndpoint {
+ ShardEndpoint(const ShardId& shardName, const ChunkVersion& shardVersion)
+ : shardName(shardName), shardVersion(shardVersion) {}
+
+ ShardEndpoint(const ShardEndpoint& other)
+ : shardName(other.shardName), shardVersion(other.shardVersion) {}
+
+ ShardId shardName;
+ ChunkVersion shardVersion;
+};
/**
* The NSTargeter interface is used by a WriteOp to generate and target child write operations
@@ -153,19 +163,4 @@ public:
virtual Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) = 0;
};
-/**
- * A ShardEndpoint represents a destination for a targeted query or document. It contains both
- * the logical target (shard name/version/broadcast) and the physical target (host name).
- */
-struct ShardEndpoint {
- ShardEndpoint(const ShardEndpoint& other)
- : shardName(other.shardName), shardVersion(other.shardVersion) {}
-
- ShardEndpoint(const ShardId& shardName, const ChunkVersion& shardVersion)
- : shardName(shardName), shardVersion(shardVersion) {}
-
- ShardId shardName;
- ChunkVersion shardVersion;
-};
-
} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index a5af42a10d0..38e83cb00f1 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/bson/timestamp.h"
+#include "mongo/client/connection_string.h"
#include "mongo/db/repl/optime.h"
#include "mongo/s/ns_targeter.h"
#include "mongo/s/write_ops/batched_command_request.h"
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index eb32b7c8204..f5f457f1440 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -28,14 +28,13 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/write_ops/batch_write_exec.h"
-
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/logical_session_id.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/sharding_test_fixture.h"
+#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/mock_ns_targeter.h"
@@ -43,16 +42,11 @@
#include "mongo/unittest/unittest.h"
namespace mongo {
-
-using std::unique_ptr;
-using std::string;
-using std::vector;
-
namespace {
const HostAndPort kTestShardHost = HostAndPort("FakeHost", 12345);
const HostAndPort kTestConfigShardHost = HostAndPort("FakeConfigHost", 12345);
-const string shardName = "FakeShard";
+const std::string shardName = "FakeShard";
const int kMaxRoundsWithoutProgress = 5;
/**
@@ -87,11 +81,10 @@ public:
setupShards(shards);
// Set up the namespace targeter to target the fake shard.
- ShardEndpoint endpoint(shardName, ChunkVersion::IGNORED());
- vector<MockRange*> mockRanges;
- mockRanges.push_back(
- new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- nsTargeter.init(mockRanges);
+ nsTargeter.init(nss,
+ {MockRange(ShardEndpoint(shardName, ChunkVersion::IGNORED()),
+ BSON("x" << MINKEY),
+ BSON("x" << MAXKEY))});
}
void expectInsertsReturnSuccess(const std::vector<BSONObj>& expected) {
@@ -290,7 +283,7 @@ TEST_F(BatchWriteExecTest, SingleOpError) {
ASSERT(response.isErrDetailsSet());
ASSERT_EQ(errResponse.getErrCode(), response.getErrDetailsAt(0)->getErrCode());
ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(errResponse.getErrMessage()) !=
- string::npos);
+ std::string::npos);
ASSERT_EQ(1, stats.numRounds);
});
@@ -479,7 +472,7 @@ TEST_F(BatchWriteExecTest, RetryableErrorNoTxnNumber) {
ASSERT(response.isErrDetailsSet());
ASSERT_EQUALS(response.getErrDetailsAt(0)->getErrCode(), retryableErrResponse.getErrCode());
ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(
- retryableErrResponse.getErrMessage()) != string::npos);
+ retryableErrResponse.getErrMessage()) != std::string::npos);
ASSERT_EQ(1, stats.numRounds);
});
@@ -561,7 +554,7 @@ TEST_F(BatchWriteExecTest, NonRetryableErrorTxnNumber) {
ASSERT_EQUALS(response.getErrDetailsAt(0)->getErrCode(),
nonRetryableErrResponse.getErrCode());
ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(
- nonRetryableErrResponse.getErrMessage()) != string::npos);
+ nonRetryableErrResponse.getErrMessage()) != std::string::npos);
ASSERT_EQ(1, stats.numRounds);
});
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 51db3c527c9..3feab353cf1 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -33,6 +33,7 @@
#include <numeric>
#include "mongo/base/error_codes.h"
+#include "mongo/db/operation_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h
index ea48b7639ed..2b91136d5cd 100644
--- a/src/mongo/s/write_ops/batch_write_op.h
+++ b/src/mongo/s/write_ops/batch_write_op.h
@@ -35,6 +35,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
+#include "mongo/db/logical_session_id.h"
#include "mongo/platform/unordered_map.h"
#include "mongo/rpc/write_concern_error_detail.h"
#include "mongo/s/ns_targeter.h"
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 13bada61f34..e7839b70fa9 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -42,30 +42,23 @@ namespace {
void initTargeterFullRange(const NamespaceString& nss,
const ShardEndpoint& endpoint,
MockNSTargeter* targeter) {
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
- targeter->init(mockRanges);
+ targeter->init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << MAXKEY))});
}
void initTargeterSplitRange(const NamespaceString& nss,
const ShardEndpoint& endpointA,
const ShardEndpoint& endpointB,
MockNSTargeter* targeter) {
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
- mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << MAXKEY)));
- targeter->init(mockRanges);
+ targeter->init(nss,
+ {MockRange(endpointA, BSON("x" << MINKEY), BSON("x" << 0)),
+ MockRange(endpointB, BSON("x" << 0), BSON("x" << MAXKEY))});
}
void initTargeterHalfRange(const NamespaceString& nss,
const ShardEndpoint& endpoint,
MockNSTargeter* targeter) {
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << 0)));
-
- // x >= 0 values untargetable
-
- targeter->init(mockRanges);
+ // x >= 0 values are untargetable
+ targeter->init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << 0))});
}
write_ops::DeleteOpEntry buildDelete(const BSONObj& query, bool multi) {
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/write_ops/mock_ns_targeter.h
index 213a5abcb6a..89ac1e64ce5 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/write_ops/mock_ns_targeter.h
@@ -28,10 +28,8 @@
#pragma once
-#include "mongo/base/owned_pointer_vector.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/range_arithmetic.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/ns_targeter.h"
#include "mongo/stdx/memory.h"
@@ -40,68 +38,15 @@
namespace mongo {
/**
- * A KeyRange represents a range over keys of documents in a namespace, qualified by a
- * key pattern which defines the documents that are in the key range.
- *
- * There may be many different expressions to generate the same key fields from a document - the
- * keyPattern tells us these expressions.
- *
- * Ex:
- * DocA : { field : "aaaa" }
- * DocB : { field : "bbb" }
- * DocC : { field : "ccccc" }
- *
- * keyPattern : { field : 1 }
- * minKey : { field : "aaaa" } : Id(DocA)
- * maxKey : { field : "ccccc" } : Id(DocB)
- *
- * contains Id(DocB)
- *
- * keyPattern : { field : "numberofletters" }
- * minKey : { field : 4 } : numberofletters(DocA)
- * maxKey : { field : 5 } : numberofletters(DocC)
- *
- * does not contain numberofletters(DocB)
- */
-struct KeyRange {
- KeyRange(const std::string& ns,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- const BSONObj& keyPattern)
- : ns(ns), minKey(minKey), maxKey(maxKey), keyPattern(keyPattern) {}
-
- KeyRange() {}
-
- std::string ns;
- BSONObj minKey;
- BSONObj maxKey;
- BSONObj keyPattern;
-};
-
-/**
* A MockRange represents a range with endpoint that a MockNSTargeter uses to direct writes to
* a particular endpoint.
*/
struct MockRange {
- MockRange(const ShardEndpoint& endpoint,
- const NamespaceString nss,
- const BSONObj& minKey,
- const BSONObj& maxKey)
- : endpoint(endpoint), range(nss.ns(), minKey, maxKey, getKeyPattern(minKey)) {}
-
- MockRange(const ShardEndpoint& endpoint, const KeyRange& range)
- : endpoint(endpoint), range(range) {}
-
- static BSONObj getKeyPattern(const BSONObj& key) {
- BSONObjIterator it(key);
- BSONObjBuilder objB;
- while (it.more())
- objB.append(it.next().fieldName(), 1);
- return objB.obj();
- }
+ MockRange(const ShardEndpoint& endpoint, const BSONObj& minKey, const BSONObj& maxKey)
+ : endpoint(endpoint), range(minKey, maxKey) {}
const ShardEndpoint endpoint;
- const KeyRange range;
+ const ChunkRange range;
};
/**
@@ -112,11 +57,12 @@ struct MockRange {
*/
class MockNSTargeter : public NSTargeter {
public:
- void init(const std::vector<MockRange*> mockRanges) {
+ void init(const NamespaceString& nss, std::vector<MockRange> mockRanges) {
+ ASSERT(nss.isValid());
+ _nss = nss;
+
ASSERT(!mockRanges.empty());
- _mockRanges.mutableVector().insert(
- _mockRanges.mutableVector().end(), mockRanges.begin(), mockRanges.end());
- _nss = NamespaceString(_mockRanges.vector().front()->range.ns);
+ _mockRanges = std::move(mockRanges);
}
const NamespaceString& getNS() const {
@@ -130,7 +76,7 @@ public:
const BSONObj& doc,
ShardEndpoint** endpoint) const override {
std::vector<std::unique_ptr<ShardEndpoint>> endpoints;
- Status status = targetQuery(doc, &endpoints);
+ Status status = _targetQuery(doc, &endpoints);
if (!status.isOK())
return status;
if (!endpoints.empty())
@@ -145,7 +91,7 @@ public:
Status targetUpdate(OperationContext* opCtx,
const write_ops::UpdateOpEntry& updateDoc,
std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
- return targetQuery(updateDoc.getQ(), endpoints);
+ return _targetQuery(updateDoc.getQ(), endpoints);
}
/**
@@ -155,52 +101,44 @@ public:
Status targetDelete(OperationContext* opCtx,
const write_ops::DeleteOpEntry& deleteDoc,
std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
- return targetQuery(deleteDoc.getQ(), endpoints);
+ return _targetQuery(deleteDoc.getQ(), endpoints);
}
Status targetCollection(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
- // TODO: XXX
// No-op
return Status::OK();
}
Status targetAllShards(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
- const std::vector<MockRange*>& ranges = getRanges();
- for (std::vector<MockRange*>::const_iterator it = ranges.begin(); it != ranges.end();
- ++it) {
- const MockRange* range = *it;
- endpoints->push_back(stdx::make_unique<ShardEndpoint>(range->endpoint));
+ for (const auto& range : _mockRanges) {
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(range.endpoint));
}
return Status::OK();
}
- void noteCouldNotTarget() {
+ void noteCouldNotTarget() override {
// No-op
}
- void noteStaleResponse(const ShardEndpoint& endpoint, const BSONObj& staleInfo) {
+ void noteStaleResponse(const ShardEndpoint& endpoint, const BSONObj& staleInfo) override {
// No-op
}
- Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) override {
// No-op
if (wasChanged)
*wasChanged = false;
return Status::OK();
}
- const std::vector<MockRange*>& getRanges() const {
- return _mockRanges.vector();
- }
-
private:
- ChunkRange parseRange(const BSONObj& query) const {
- const std::string fieldName = query.firstElement().fieldName();
+ static ChunkRange _parseRange(const BSONObj& query) {
+ const StringData fieldName(query.firstElement().fieldName());
if (query.firstElement().isNumber()) {
- return ChunkRange(BSON(fieldName << query.firstElement().numberInt()),
- BSON(fieldName << query.firstElement().numberInt() + 1));
+ return {BSON(fieldName << query.firstElement().numberInt()),
+ BSON(fieldName << query.firstElement().numberInt() + 1)};
} else if (query.firstElement().type() == Object) {
BSONObj queryRange = query.firstElement().Obj();
@@ -212,43 +150,36 @@ private:
BSONObjBuilder maxKeyB;
maxKeyB.appendAs(queryRange[LT.l_], fieldName);
- return ChunkRange(minKeyB.obj(), maxKeyB.obj());
+ return {minKeyB.obj(), maxKeyB.obj()};
}
- ASSERT(false);
- return ChunkRange({}, {});
+ FAIL("Invalid query");
+ MONGO_UNREACHABLE;
}
/**
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetQuery(const BSONObj& query,
- std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
- ChunkRange queryRange(parseRange(query));
-
- const std::vector<MockRange*>& ranges = getRanges();
- for (std::vector<MockRange*>::const_iterator it = ranges.begin(); it != ranges.end();
- ++it) {
- const MockRange* range = *it;
+ Status _targetQuery(const BSONObj& query,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
+ ChunkRange queryRange(_parseRange(query));
- if (rangeOverlaps(queryRange.getMin(),
- queryRange.getMax(),
- range->range.minKey,
- range->range.maxKey)) {
- endpoints->push_back(stdx::make_unique<ShardEndpoint>(range->endpoint));
+ for (const auto& range : _mockRanges) {
+ if (queryRange.overlapWith(range.range)) {
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(range.endpoint));
}
}
if (endpoints->empty())
- return Status(ErrorCodes::UnknownError, "no mock ranges found for query");
+ return {ErrorCodes::UnknownError, "no mock ranges found for query"};
+
return Status::OK();
}
NamespaceString _nss;
- // Manually-stored ranges
- OwnedPointerVector<MockRange> _mockRanges;
+ std::vector<MockRange> _mockRanges;
};
inline void assertEndpointsEqual(const ShardEndpoint& endpointA, const ShardEndpoint& endpointB) {
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 177720c77f5..2d77ed4b56b 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -53,46 +53,39 @@ const WriteErrorDetail& WriteOp::getOpError() const {
Status WriteOp::targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites) {
- bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
- bool isDelete = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Delete;
- bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
+ const bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
Status targetStatus = Status::OK();
std::vector<std::unique_ptr<ShardEndpoint>> endpoints;
- if (isUpdate) {
- targetStatus = targeter.targetUpdate(opCtx, _itemRef.getUpdate(), &endpoints);
- } else if (isDelete) {
- targetStatus = targeter.targetDelete(opCtx, _itemRef.getDelete(), &endpoints);
- } else {
- dassert(_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert);
-
- ShardEndpoint* endpoint = NULL;
- // TODO: Remove the index targeting stuff once there is a command for it
- if (!isIndexInsert) {
- targetStatus = targeter.targetInsert(opCtx, _itemRef.getDocument(), &endpoint);
- } else {
+ if (_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert) {
+ if (isIndexInsert) {
+ // TODO: Remove the index targeting stuff once there is a command for it?
// TODO: Retry index writes with stale version?
targetStatus = targeter.targetCollection(&endpoints);
+ } else {
+ ShardEndpoint* endpoint = nullptr;
+ targetStatus = targeter.targetInsert(opCtx, _itemRef.getDocument(), &endpoint);
+ if (targetStatus.isOK()) {
+ // Store single endpoint result if we targeted a single endpoint
+ endpoints.push_back(std::unique_ptr<ShardEndpoint>{endpoint});
+ }
}
-
- if (!targetStatus.isOK()) {
- dassert(NULL == endpoint);
- return targetStatus;
- }
-
- // Store single endpoint result if we targeted a single endpoint
- if (endpoint)
- endpoints.push_back(std::unique_ptr<ShardEndpoint>{endpoint});
+ } else if (_itemRef.getOpType() == BatchedCommandRequest::BatchType_Update) {
+ targetStatus = targeter.targetUpdate(opCtx, _itemRef.getUpdate(), &endpoints);
+ } else if (_itemRef.getOpType() == BatchedCommandRequest::BatchType_Delete) {
+ targetStatus = targeter.targetDelete(opCtx, _itemRef.getDelete(), &endpoints);
+ } else {
+ MONGO_UNREACHABLE;
}
- // If we're targeting more than one endpoint with an update/delete, we have to target
- // everywhere since we cannot currently retry partial results.
- // NOTE: Index inserts are currently specially targeted only at the current collection to
- // avoid creating collections everywhere.
+ // If we're targeting more than one endpoint with an update/delete, we have to target everywhere
+ // since we cannot currently retry partial results.
+ //
+ // NOTE: Index inserts are currently specially targeted only at the current collection to avoid
+ // creating collections everywhere.
if (targetStatus.isOK() && endpoints.size() > 1u && !isIndexInsert) {
endpoints.clear();
- invariant(endpoints.empty());
targetStatus = targeter.targetAllShards(&endpoints);
}
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index 952d34b2b4b..1e098a22169 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -28,7 +28,6 @@
#include "mongo/platform/basic.h"
-#include "mongo/base/error_codes.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/s/write_ops/batched_command_request.h"
@@ -89,13 +88,10 @@ TEST(WriteOpTests, BasicError) {
TEST(WriteOpTests, TargetSingle) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({BSON("x" << 1)});
@@ -108,7 +104,7 @@ TEST(WriteOpTests, TargetSingle) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
@@ -126,17 +122,12 @@ TEST(WriteOpTests, TargetSingle) {
// Multi-write targeting test where our query goes to one shard
TEST(WriteOpTests, TargetMultiOneShard) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()));
ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()));
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
- mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << 10)));
- mockRanges.push_back(new MockRange(endpointC, nss, BSON("x" << 10), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(nss);
// Only hits first shard
@@ -148,7 +139,10 @@ TEST(WriteOpTests, TargetMultiOneShard) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss,
+ {MockRange(endpointA, BSON("x" << MINKEY), BSON("x" << 0)),
+ MockRange(endpointB, BSON("x" << 0), BSON("x" << 10)),
+ MockRange(endpointC, BSON("x" << 10), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
@@ -167,17 +161,12 @@ TEST(WriteOpTests, TargetMultiOneShard) {
// Multi-write targeting test where our write goes to more than one shard
TEST(WriteOpTests, TargetMultiAllShards) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()));
ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()));
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
- mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << 10)));
- mockRanges.push_back(new MockRange(endpointC, nss, BSON("x" << 10), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Delete deleteOp(nss);
deleteOp.setDeletes({buildDelete(BSON("x" << GTE << -1 << LT << 1), false)});
@@ -189,7 +178,10 @@ TEST(WriteOpTests, TargetMultiAllShards) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss,
+ {MockRange(endpointA, BSON("x" << MINKEY), BSON("x" << 0)),
+ MockRange(endpointB, BSON("x" << 0), BSON("x" << 10)),
+ MockRange(endpointC, BSON("x" << 10), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
@@ -216,13 +208,10 @@ TEST(WriteOpTests, TargetMultiAllShards) {
// Single error after targeting test
TEST(WriteOpTests, ErrorSingle) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({BSON("x" << 1)});
@@ -235,7 +224,7 @@ TEST(WriteOpTests, ErrorSingle) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
@@ -260,13 +249,10 @@ TEST(WriteOpTests, ErrorSingle) {
// Cancel single targeting test
TEST(WriteOpTests, CancelSingle) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({BSON("x" << 1)});
@@ -279,7 +265,7 @@ TEST(WriteOpTests, CancelSingle) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
@@ -302,13 +288,10 @@ TEST(WriteOpTests, CancelSingle) {
// Retry single targeting test
TEST(WriteOpTests, RetrySingleOp) {
OperationContextNoop opCtx;
- NamespaceString nss("foo.bar");
+ NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
- std::vector<MockRange*> mockRanges;
- mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
-
BatchedCommandRequest request([&] {
write_ops::Insert insertOp(nss);
insertOp.setDocuments({BSON("x" << 1)});
@@ -321,7 +304,7 @@ TEST(WriteOpTests, RetrySingleOp) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
MockNSTargeter targeter;
- targeter.init(mockRanges);
+ targeter.init(nss, {MockRange(endpoint, BSON("x" << MINKEY), BSON("x" << MAXKEY))});
OwnedPointerVector<TargetedWrite> targetedOwned;
std::vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();