summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-13 14:15:08 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-14 15:27:43 -0400
commit914c12f4c7e850cacc6292bc330101579a0627ef (patch)
tree54b3e59b4a066e3b3acf4b1bc44998cebf7f6fe5
parentc8d6784595dc2ea4d6ec1bc901578f15e9c6b076 (diff)
downloadmongo-914c12f4c7e850cacc6292bc330101579a0627ef.tar.gz
SERVER-19855 Make write_ops have their own SConscript
Also fixes some includes and dependencies.
-rw-r--r--src/mongo/db/SConscript2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp158
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp6
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp (renamed from src/mongo/db/commands/merge_chunks_cmd.cpp)0
-rw-r--r--src/mongo/db/s/sharding_state.cpp9
-rw-r--r--src/mongo/dbtests/SConscript1
-rw-r--r--src/mongo/s/SConscript119
-rw-r--r--src/mongo/s/catalog/SConscript2
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp8
-rw-r--r--src/mongo/s/chunk_diff_test.cpp2
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp9
-rw-r--r--src/mongo/s/ns_targeter.h2
-rw-r--r--src/mongo/s/write_ops/SConscript92
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.cpp3
-rw-r--r--src/mongo/s/write_ops/batch_downconvert.h3
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.cpp35
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.h8
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp18
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp11
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp48
-rw-r--r--src/mongo/s/write_ops/batched_command_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h2
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp35
-rw-r--r--src/mongo/s/write_ops/batched_delete_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_delete_request.h1
-rw-r--r--src/mongo/s/write_ops/batched_delete_request_test.cpp35
-rw-r--r--src/mongo/s/write_ops/batched_insert_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_insert_request.h1
-rw-r--r--src/mongo/s/write_ops/batched_insert_request_test.cpp38
-rw-r--r--src/mongo/s/write_ops/batched_request_metadata_test.cpp37
-rw-r--r--src/mongo/s/write_ops/batched_update_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_update_request.h1
-rw-r--r--src/mongo/s/write_ops/batched_update_request_test.cpp36
-rw-r--r--src/mongo/s/write_ops/write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp13
38 files changed, 358 insertions, 406 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 9a294ece678..7c776eb8baf 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -533,7 +533,6 @@ serverOnlyFiles = [
"commands/list_collections.cpp",
"commands/list_databases.cpp",
"commands/list_indexes.cpp",
- "commands/merge_chunks_cmd.cpp",
"commands/mr.cpp",
"commands/oplog_note.cpp",
"commands/parallel_collection_scan.cpp",
@@ -624,7 +623,6 @@ serveronlyEnv.InjectThirdPartyIncludePaths(libraries=['snappy'])
serveronlyLibdeps = [
"$BUILD_DIR/mongo/client/parallel",
"$BUILD_DIR/mongo/executor/network_interface_factory",
- "$BUILD_DIR/mongo/s/batch_write_types",
"$BUILD_DIR/mongo/s/catalog/legacy/catalog_manager_legacy",
"$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set",
"$BUILD_DIR/mongo/s/client/sharding_connection_hook",
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index bfa4b0f6623..bbfe3e4a1d9 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -51,7 +51,6 @@
#include "mongo/db/lasterror.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
-#include "mongo/db/operation_context_impl.h"
#include "mongo/db/ops/delete_request.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/ops/parsed_delete.h"
@@ -83,10 +82,10 @@
namespace mongo {
-using std::endl;
using std::string;
using std::unique_ptr;
using std::vector;
+using str::stream;
namespace {
@@ -118,18 +117,7 @@ private:
std::unique_ptr<WriteErrorDetail> _error;
};
-} // namespace
-
-// TODO: Determine queueing behavior we want here
-MONGO_EXPORT_SERVER_PARAMETER(queueForMigrationCommit, bool, true);
-
-using mongoutils::str::stream;
-
-WriteBatchExecutor::WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le)
- : _txn(txn), _opCounters(opCounters), _le(le), _stats(new WriteBatchStats) {}
-
-static WCErrorDetail* toWriteConcernError(const Status& wcStatus,
- const WriteConcernResult& wcResult) {
+WCErrorDetail* toWriteConcernError(const Status& wcStatus, const WriteConcernResult& wcResult) {
WCErrorDetail* wcError = new WCErrorDetail;
wcError->setErrCode(wcStatus.code());
@@ -140,7 +128,7 @@ static WCErrorDetail* toWriteConcernError(const Status& wcStatus,
return wcError;
}
-static WriteErrorDetail* toWriteError(const Status& status) {
+WriteErrorDetail* toWriteError(const Status& status) {
WriteErrorDetail* error = new WriteErrorDetail;
// TODO: Complex transform here?
@@ -150,7 +138,7 @@ static WriteErrorDetail* toWriteError(const Status& status) {
return error;
}
-static void toBatchError(const Status& status, BatchedCommandResponse* response) {
+void toBatchError(const Status& status, BatchedCommandResponse* response) {
response->clear();
response->setErrCode(status.code());
response->setErrMessage(status.reason());
@@ -158,7 +146,7 @@ static void toBatchError(const Status& status, BatchedCommandResponse* response)
dassert(response->isValid(NULL));
}
-static void noteInCriticalSection(WriteErrorDetail* staleError) {
+void noteInCriticalSection(WriteErrorDetail* staleError) {
BSONObjBuilder builder;
if (staleError->isErrInfoSet())
builder.appendElements(staleError->getErrInfo());
@@ -166,6 +154,77 @@ static void noteInCriticalSection(WriteErrorDetail* staleError) {
staleError->setErrInfo(builder.obj());
}
+/**
+ * Translates write item type to wire protocol op code. Helper for
+ * WriteBatchExecutor::applyWriteItem().
+ */
+int getOpCode(const BatchItemRef& currWrite) {
+ switch (currWrite.getRequest()->getBatchType()) {
+ case BatchedCommandRequest::BatchType_Insert:
+ return dbInsert;
+ case BatchedCommandRequest::BatchType_Update:
+ return dbUpdate;
+ case BatchedCommandRequest::BatchType_Delete:
+ return dbDelete;
+ default:
+ MONGO_UNREACHABLE;
+ }
+}
+
+void buildStaleError(const ChunkVersion& shardVersionRecvd,
+ const ChunkVersion& shardVersionWanted,
+ WriteErrorDetail* error) {
+ // Write stale error to results
+ error->setErrCode(ErrorCodes::StaleShardVersion);
+
+ BSONObjBuilder infoB;
+ shardVersionWanted.addToBSON(infoB, "vWanted");
+ error->setErrInfo(infoB.obj());
+
+ string errMsg = stream() << "stale shard version detected before write, received "
+ << shardVersionRecvd.toString() << " but local version is "
+ << shardVersionWanted.toString();
+ error->setErrMessage(errMsg);
+}
+
+bool checkShardVersion(OperationContext* txn,
+ const BatchedCommandRequest& request,
+ WriteOpResult* result) {
+ const NamespaceString& nss = request.getTargetingNSS();
+ dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
+
+ ChunkVersion requestShardVersion =
+ request.isMetadataSet() && request.getMetadata()->isShardVersionSet()
+ ? request.getMetadata()->getShardVersion()
+ : ChunkVersion::IGNORED();
+
+ ShardingState* shardingState = ShardingState::get(txn);
+ if (shardingState->enabled()) {
+ CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
+
+ if (!ChunkVersion::isIgnoredVersion(requestShardVersion)) {
+ ChunkVersion shardVersion =
+ metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
+
+ if (!requestShardVersion.isWriteCompatibleWith(shardVersion)) {
+ result->setError(new WriteErrorDetail);
+ buildStaleError(requestShardVersion, shardVersion, result->getError());
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+// TODO: Determine queueing behavior we want here
+MONGO_EXPORT_SERVER_PARAMETER(queueForMigrationCommit, bool, true);
+
+WriteBatchExecutor::WriteBatchExecutor(OperationContext* txn, OpCounters* opCounters, LastError* le)
+ : _txn(txn), _opCounters(opCounters), _le(le), _stats(new WriteBatchStats) {}
+
// static
Status WriteBatchExecutor::validateBatch(const BatchedCommandRequest& request) {
// Validate namespace
@@ -314,7 +373,7 @@ void WriteBatchExecutor::executeBatch(const BatchedCommandRequest& request,
while (shardingState->inCriticalMigrateSection()) {
log() << "write request to old shard version "
<< requestMetadata->getShardVersion().toString()
- << " waiting for migration commit" << endl;
+ << " waiting for migration commit";
shardingState->waitTillNotInCriticalSection(10 /* secs */);
}
@@ -361,67 +420,6 @@ void WriteBatchExecutor::executeBatch(const BatchedCommandRequest& request,
dassert(response->isValid(NULL));
}
-// Translates write item type to wire protocol op code.
-// Helper for WriteBatchExecutor::applyWriteItem().
-static int getOpCode(const BatchItemRef& currWrite) {
- switch (currWrite.getRequest()->getBatchType()) {
- case BatchedCommandRequest::BatchType_Insert:
- return dbInsert;
- case BatchedCommandRequest::BatchType_Update:
- return dbUpdate;
- case BatchedCommandRequest::BatchType_Delete:
- return dbDelete;
- default:
- MONGO_UNREACHABLE;
- }
-}
-
-static void buildStaleError(const ChunkVersion& shardVersionRecvd,
- const ChunkVersion& shardVersionWanted,
- WriteErrorDetail* error) {
- // Write stale error to results
- error->setErrCode(ErrorCodes::StaleShardVersion);
-
- BSONObjBuilder infoB;
- shardVersionWanted.addToBSON(infoB, "vWanted");
- error->setErrInfo(infoB.obj());
-
- string errMsg = stream() << "stale shard version detected before write, received "
- << shardVersionRecvd.toString() << " but local version is "
- << shardVersionWanted.toString();
- error->setErrMessage(errMsg);
-}
-
-static bool checkShardVersion(OperationContext* txn,
- const BatchedCommandRequest& request,
- WriteOpResult* result) {
- const NamespaceString& nss = request.getTargetingNSS();
- dassert(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_IX));
-
- ChunkVersion requestShardVersion =
- request.isMetadataSet() && request.getMetadata()->isShardVersionSet()
- ? request.getMetadata()->getShardVersion()
- : ChunkVersion::IGNORED();
-
- ShardingState* shardingState = ShardingState::get(txn);
- if (shardingState->enabled()) {
- CollectionMetadataPtr metadata = shardingState->getCollectionMetadata(nss.ns());
-
- if (!ChunkVersion::isIgnoredVersion(requestShardVersion)) {
- ChunkVersion shardVersion =
- metadata ? metadata->getShardVersion() : ChunkVersion::UNSHARDED();
-
- if (!requestShardVersion.isWriteCompatibleWith(shardVersion)) {
- result->setError(new WriteErrorDetail);
- buildStaleError(requestShardVersion, shardVersion, result->getError());
- return false;
- }
- }
- }
-
- return true;
-}
-
static bool checkIsMasterForDatabase(const NamespaceString& ns, WriteOpResult* result) {
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns)) {
WriteErrorDetail* errorDetail = new WriteErrorDetail;
@@ -563,7 +561,7 @@ static void finishCurrentOp(OperationContext* txn, WriteErrorDetail* opError) {
ExceptionInfo(opError->getErrMessage(), opError->getErrCode());
LOG(3) << " Caught Assertion in " << opToString(currentOp->getOp()) << ", continuing "
- << causedBy(opError->getErrMessage()) << endl;
+ << causedBy(opError->getErrMessage());
}
bool logAll = logger::globalLogDomain()->shouldLog(logger::LogComponent::kWrite,
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 5d6c7e7884d..a5f924738c5 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -38,6 +38,7 @@ env.Library(
env.Library(
target='commands',
source=[
+ 'merge_chunks_command.cpp',
'set_shard_version_command.cpp',
],
LIBDEPS=[
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 3df8e8b82f7..607002edfe6 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -617,8 +617,7 @@ TEST_F(SingleChunkFixture, MinusChunkWithPending) {
}
TEST_F(SingleChunkFixture, SingleSplit) {
- ChunkVersion version;
- getCollMetadata().getCollVersion().cloneTo(&version);
+ ChunkVersion version = getCollMetadata().getCollVersion();
version.incMinor();
ChunkType chunk;
@@ -662,8 +661,7 @@ TEST_F(SingleChunkFixture, MultiSplit) {
splitPoints.push_back(BSON("a" << 14));
splitPoints.push_back(BSON("a" << 16));
- ChunkVersion version;
- getCollMetadata().getCollVersion().cloneTo(&version);
+ ChunkVersion version = getCollMetadata().getCollVersion();
version.incMinor();
cloned.reset(getCollMetadata().cloneSplit(chunk, splitPoints, version, &errMsg));
diff --git a/src/mongo/db/commands/merge_chunks_cmd.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 58fc9c71ac3..58fc9c71ac3 100644
--- a/src/mongo/db/commands/merge_chunks_cmd.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 0214c2829ee..8865a839e5c 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -422,6 +422,7 @@ Status ShardingState::refreshMetadataIfNeeded(OperationContext* txn,
if (it != _collMetadata.end())
storedMetadata = it->second;
}
+
ChunkVersion storedShardVersion;
if (storedMetadata)
storedShardVersion = storedMetadata->getShardVersion();
@@ -542,8 +543,7 @@ Status ShardingState::doRefreshMetadata(OperationContext* txn,
string errMsg;
MetadataLoader mdLoader;
- CollectionMetadata* remoteMetadataRaw = new CollectionMetadata();
- shared_ptr<CollectionMetadata> remoteMetadata(remoteMetadataRaw);
+ shared_ptr<CollectionMetadata> remoteMetadata(std::make_shared<CollectionMetadata>());
Timer refreshTimer;
long long refreshMillis;
@@ -554,12 +554,11 @@ Status ShardingState::doRefreshMetadata(OperationContext* txn,
ns,
getShardName(),
fullReload ? NULL : beforeMetadata.get(),
- remoteMetadataRaw);
+ remoteMetadata.get());
refreshMillis = refreshTimer.millis();
if (status.code() == ErrorCodes::NamespaceNotFound) {
remoteMetadata.reset();
- remoteMetadataRaw = NULL;
} else if (!status.isOK()) {
warning() << "could not remotely refresh metadata for " << ns
<< causedBy(status.reason());
@@ -630,7 +629,7 @@ Status ShardingState::doRefreshMetadata(OperationContext* txn,
// Resolve newer pending chunks with the remote metadata, finish construction
//
- Status status = mdLoader.promotePendingChunks(afterMetadata.get(), remoteMetadataRaw);
+ Status status = mdLoader.promotePendingChunks(afterMetadata.get(), remoteMetadata.get());
if (!status.isOK()) {
warning() << "remote metadata for " << ns
diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript
index 927a2936baa..7a4f69d78c0 100644
--- a/src/mongo/dbtests/SConscript
+++ b/src/mongo/dbtests/SConscript
@@ -109,7 +109,6 @@ dbtest = env.Program(
"$BUILD_DIR/mongo/db/repl/replmocks",
"$BUILD_DIR/mongo/bson/mutable/mutable_bson_test_utils",
"$BUILD_DIR/mongo/platform/platform",
- "$BUILD_DIR/mongo/s/cluster_ops",
"$BUILD_DIR/mongo/db/serveronly",
"$BUILD_DIR/mongo/util/concurrency/rwlock",
"mocklib",
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 676f1fe21ad..df9b3cdef5d 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -8,6 +8,7 @@ env.SConscript(
'client',
'commands',
'query',
+ 'write_ops',
],
)
@@ -81,114 +82,6 @@ env.CppUnitTest(
)
#
-# Write Operations
-#
-
-# Types for write operations
-# TODO: Push upward into shared types library between mongos/mongod
-env.Library(
- target='batch_write_types',
- source=[
- 'write_ops/batched_command_request.cpp',
- 'write_ops/batched_command_response.cpp',
- 'write_ops/batched_delete_request.cpp',
- 'write_ops/batched_delete_document.cpp',
- 'write_ops/batched_insert_request.cpp',
- 'write_ops/batched_request_metadata.cpp',
- 'write_ops/batched_update_request.cpp',
- 'write_ops/batched_update_document.cpp',
- 'write_ops/batched_upsert_detail.cpp',
- 'write_ops/wc_error_detail.cpp',
- 'write_ops/write_error_detail.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/common',
- ],
-)
-
-env.CppUnitTest(
- target='batch_write_types_test',
- source=[
- 'write_ops/batched_command_request_test.cpp',
- 'write_ops/batched_command_response_test.cpp',
- 'write_ops/batched_delete_request_test.cpp',
- 'write_ops/batched_insert_request_test.cpp',
- 'write_ops/batched_request_metadata_test.cpp',
- 'write_ops/batched_update_request_test.cpp',
- ],
- LIBDEPS=[
- 'batch_write_types',
- '$BUILD_DIR/mongo/db/common',
- ]
-)
-
-#
-# State and execution of operations across multiple hosts
-#
-# This functionality is self-contained and independent of any network or system-level
-# code.
-#
-env.Library(
- target='cluster_ops',
- source=[
- 'write_ops/write_op.cpp',
- 'write_ops/batch_write_op.cpp',
- 'write_ops/batch_write_exec.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/client/clientdriver',
- 'batch_write_types',
- '$BUILD_DIR/mongo/util/concurrency/synchronization'
- ],
-)
-
-env.CppUnitTest(
- target='cluster_ops_test',
- source=[
- 'write_ops/write_op_test.cpp',
- 'write_ops/batch_write_op_test.cpp',
- 'write_ops/batch_write_exec_test.cpp',
- ],
- LIBDEPS=[
- 'cluster_ops',
- '$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/db/range_arithmetic',
- '$BUILD_DIR/mongo/db/service_context',
- ]
-)
-
-# Upconvert/Downconvert write functionality for mongos
-env.Library(
- target='cluster_write_op_conversion',
- source=[
- 'write_ops/batch_upconvert.cpp',
- 'write_ops/batch_downconvert.cpp',
- ],
- LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- '$BUILD_DIR/mongo/db/common', # for Message
- '$BUILD_DIR/mongo/db/lasterror',
- 'cluster_ops',
- ],
-)
-
-env.CppUnitTest(
- target='cluster_write_op_conversion_test',
- source=[
- 'write_ops/batch_upconvert_test.cpp',
- 'write_ops/batch_downconvert_test.cpp',
- ],
- LIBDEPS=[
- 'cluster_ops',
- 'cluster_write_op_conversion',
- '$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/client/clientdriver',
- ]
-)
-
-#
# Implementations of components to perform cluster operations in mongos
#
# This is the glue code implementing the interfaces required by cluster ops
@@ -203,10 +96,10 @@ env.Library(
'dbclient_shard_resolver.cpp',
],
LIBDEPS=[
- '$BUILD_DIR/mongo/base',
- 'cluster_ops',
- 'cluster_write_op_conversion',
'client/sharding_client',
+ 'write_ops/cluster_write_op',
+ 'write_ops/cluster_write_op_conversion',
+ '$BUILD_DIR/mongo/base',
],
)
@@ -268,8 +161,8 @@ env.Library(
'$BUILD_DIR/mongo/db/stats/counters',
'$BUILD_DIR/mongo/s/query/cluster_query',
'$BUILD_DIR/mongo/util/concurrency/task',
- 'cluster_ops',
- 'cluster_write_op_conversion',
+ 'write_ops/cluster_write_op',
+ 'write_ops/cluster_write_op_conversion',
]
)
diff --git a/src/mongo/s/catalog/SConscript b/src/mongo/s/catalog/SConscript
index a0ed81168d8..f86637b1ff3 100644
--- a/src/mongo/s/catalog/SConscript
+++ b/src/mongo/s/catalog/SConscript
@@ -52,10 +52,10 @@ env.Library(
LIBDEPS=[
'$BUILD_DIR/mongo/base',
'$BUILD_DIR/mongo/db/common',
- '$BUILD_DIR/mongo/s/batch_write_types',
'$BUILD_DIR/mongo/s/catalog/catalog_types',
'$BUILD_DIR/mongo/s/client/sharding_client',
'$BUILD_DIR/mongo/s/shard_util',
+ '$BUILD_DIR/mongo/s/write_ops/batch_write_types',
]
)
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index bc22f23ddb1..5bfab7a31af 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -124,8 +124,8 @@ TEST(ChunkType, NewFormatVersion) {
ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
ChunkVersion fetchedVersion = chunk.getVersion();
- ASSERT_EQUALS(fetchedVersion._combined, 1ULL);
- ASSERT_EQUALS(fetchedVersion._epoch, epoch);
+ ASSERT_EQUALS(fetchedVersion.toLong(), 1ULL);
+ ASSERT_EQUALS(fetchedVersion.epoch(), epoch);
ASSERT_EQUALS(chunk.getShard(), "shard0001");
ASSERT_TRUE(chunk.validate().isOK());
}
@@ -147,8 +147,8 @@ TEST(ChunkType, OldFormatVersion) {
ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
ChunkVersion fetchedVersion = chunk.getVersion();
- ASSERT_EQUALS(fetchedVersion._combined, 1ULL);
- ASSERT_EQUALS(fetchedVersion._epoch, epoch);
+ ASSERT_EQUALS(fetchedVersion.toLong(), 1ULL);
+ ASSERT_EQUALS(fetchedVersion.epoch(), epoch);
ASSERT_EQUALS(chunk.getShard(), "shard0001");
ASSERT_TRUE(chunk.validate().isOK());
}
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index f7c924eb901..76df3e999a6 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -241,7 +241,6 @@ protected:
rightB.append(chunk[ChunkType::shard()]);
version.incMajor();
- version._minor = 0;
version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
version.incMinor();
version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
@@ -294,7 +293,6 @@ protected:
prevShardB.append(prevShardChunk[ChunkType::shard()]);
version.incMajor();
- version._minor = 0;
version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
version.incMinor();
version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index ee98e9a8481..f4448dafe5d 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -582,13 +582,12 @@ void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
ChunkVersion& previouslyNotedVersion = it->second;
if (previouslyNotedVersion.hasEqualEpoch(remoteShardVersion)) {
if (previouslyNotedVersion.isOlderThan(remoteShardVersion)) {
- remoteShardVersion.cloneTo(&previouslyNotedVersion);
+ previouslyNotedVersion = remoteShardVersion;
}
} else {
- // Epoch changed midway while applying the batch so set the version to
- // something unique and non-existent to force a reload when
- // refreshIsNeeded is called.
- ChunkVersion::IGNORED().cloneTo(&previouslyNotedVersion);
+ // Epoch changed midway while applying the batch so set the version to something unique
+ // and non-existent to force a reload when refreshIsNeeded is called.
+ previouslyNotedVersion = ChunkVersion::IGNORED();
}
}
}
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 7aed94536ea..087725697d2 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -154,8 +154,6 @@ public:
* the logical target (shard name/version/broadcast) and the physical target (host name).
*/
struct ShardEndpoint {
- ShardEndpoint() {}
-
ShardEndpoint(const ShardEndpoint& other)
: shardName(other.shardName), shardVersion(other.shardVersion) {}
diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript
new file mode 100644
index 00000000000..f8301f09e2c
--- /dev/null
+++ b/src/mongo/s/write_ops/SConscript
@@ -0,0 +1,92 @@
+# -*- mode: python -*-
+
+Import("env")
+
+env.Library(
+ target='batch_write_types',
+ source=[
+ 'batched_command_request.cpp',
+ 'batched_command_response.cpp',
+ 'batched_delete_request.cpp',
+ 'batched_delete_document.cpp',
+ 'batched_insert_request.cpp',
+ 'batched_request_metadata.cpp',
+ 'batched_update_request.cpp',
+ 'batched_update_document.cpp',
+ 'batched_upsert_detail.cpp',
+ 'wc_error_detail.cpp',
+ 'write_error_detail.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/db/common',
+ '$BUILD_DIR/mongo/s/common',
+ ],
+)
+
+env.Library(
+ target='cluster_write_op',
+ source=[
+ 'write_op.cpp',
+ 'batch_write_op.cpp',
+ 'batch_write_exec.cpp',
+ ],
+ LIBDEPS=[
+ 'batch_write_types',
+ '$BUILD_DIR/mongo/client/connection_string',
+ ],
+)
+
+env.Library(
+ target='cluster_write_op_conversion',
+ source=[
+ 'batch_upconvert.cpp',
+ 'batch_downconvert.cpp',
+ ],
+ LIBDEPS=[
+ 'cluster_write_op',
+ '$BUILD_DIR/mongo/db/dbmessage',
+ '$BUILD_DIR/mongo/db/lasterror',
+ ],
+)
+
+env.CppUnitTest(
+ target='batch_write_types_test',
+ source=[
+ 'batched_command_request_test.cpp',
+ 'batched_command_response_test.cpp',
+ 'batched_delete_request_test.cpp',
+ 'batched_insert_request_test.cpp',
+ 'batched_request_metadata_test.cpp',
+ 'batched_update_request_test.cpp',
+ ],
+ LIBDEPS=[
+ 'batch_write_types',
+ ]
+)
+
+env.CppUnitTest(
+ target='cluster_write_op_test',
+ source=[
+ 'write_op_test.cpp',
+ 'batch_write_op_test.cpp',
+ 'batch_write_exec_test.cpp',
+ ],
+ LIBDEPS=[
+ 'cluster_write_op',
+ '$BUILD_DIR/mongo/db/range_arithmetic',
+ '$BUILD_DIR/mongo/db/service_context',
+ ]
+)
+
+env.CppUnitTest(
+ target='cluster_write_op_conversion_test',
+ source=[
+ 'batch_upconvert_test.cpp',
+ 'batch_downconvert_test.cpp',
+ ],
+ LIBDEPS=[
+ 'cluster_write_op',
+ 'cluster_write_op_conversion',
+ ]
+)
diff --git a/src/mongo/s/write_ops/batch_downconvert.cpp b/src/mongo/s/write_ops/batch_downconvert.cpp
index 1d76767abde..a10ccdce874 100644
--- a/src/mongo/s/write_ops/batch_downconvert.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert.cpp
@@ -288,4 +288,5 @@ Status enforceLegacyWriteConcern(MultiCommandDispatch* dispatcher,
: ErrorCodes::MultipleErrorsOccurred,
builder.str());
}
-}
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_downconvert.h b/src/mongo/s/write_ops/batch_downconvert.h
index 659e59d3a9a..46607aef1c0 100644
--- a/src/mongo/s/write_ops/batch_downconvert.h
+++ b/src/mongo/s/write_ops/batch_downconvert.h
@@ -85,4 +85,5 @@ Status extractGLEErrors(const BSONObj& gleResponse, GLEErrors* errors);
* Given a GLE response, strips out all non-write-concern related information
*/
BSONObj stripNonWCInfo(const BSONObj& gleResponse);
-}
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_upconvert.cpp b/src/mongo/s/write_ops/batch_upconvert.cpp
index 90dbec0b16e..1b645c96c75 100644
--- a/src/mongo/s/write_ops/batch_upconvert.cpp
+++ b/src/mongo/s/write_ops/batch_upconvert.cpp
@@ -30,7 +30,6 @@
#include "mongo/s/write_ops/batch_upconvert.h"
-
#include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/dbmessage.h"
@@ -43,24 +42,14 @@
namespace mongo {
-using mongoutils::str::stream;
+using str::stream;
using std::string;
using std::unique_ptr;
using std::vector;
-void msgToBatchRequests(const Message& msg, vector<BatchedCommandRequest*>* requests) {
- int opType = msg.operation();
-
- if (opType == dbInsert) {
- msgToBatchInserts(msg, requests);
- } else if (opType == dbUpdate) {
- requests->push_back(msgToBatchUpdate(msg));
- } else {
- dassert(opType == dbDelete);
- requests->push_back(msgToBatchDelete(msg));
- }
-}
+namespace {
+// Batch inserts may get mapped to multiple batch requests, to avoid spilling MaxBSONObjSize
void msgToBatchInserts(const Message& insertMsg, vector<BatchedCommandRequest*>* insertRequests) {
// Parsing DbMessage throws
DbMessage dbMsg(insertMsg);
@@ -156,6 +145,21 @@ void buildErrorFromResponse(const BatchedCommandResponse& response, WriteErrorDe
error->setErrMessage(response.getErrMessage());
}
+} // namespace
+
+void msgToBatchRequests(const Message& msg, vector<BatchedCommandRequest*>* requests) {
+ int opType = msg.operation();
+
+ if (opType == dbInsert) {
+ msgToBatchInserts(msg, requests);
+ } else if (opType == dbUpdate) {
+ requests->push_back(msgToBatchUpdate(msg));
+ } else {
+ dassert(opType == dbDelete);
+ requests->push_back(msgToBatchDelete(msg));
+ }
+}
+
bool batchErrorToLastError(const BatchedCommandRequest& request,
const BatchedCommandResponse& response,
LastError* error) {
@@ -222,4 +226,5 @@ bool batchErrorToLastError(const BatchedCommandRequest& request,
return false;
}
-}
+
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_upconvert.h b/src/mongo/s/write_ops/batch_upconvert.h
index a0b4712cf96..d54dfbb93d2 100644
--- a/src/mongo/s/write_ops/batch_upconvert.h
+++ b/src/mongo/s/write_ops/batch_upconvert.h
@@ -44,14 +44,6 @@ namespace mongo {
void msgToBatchRequests(const Message& msg, std::vector<BatchedCommandRequest*>* requests);
-// Batch inserts may get mapped to multiple batch requests, to avoid spilling MaxBSONObjSize
-void msgToBatchInserts(const Message& insertMsg,
- std::vector<BatchedCommandRequest*>* insertRequests);
-
-BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg);
-
-BatchedCommandRequest* msgToBatchDelete(const Message& deleteMsg);
-
/**
* Utility function for recording completed batch writes into the LastError object.
* (Interpreting the response requires the request object as well.)
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index d2876b4e362..9891b182e29 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -36,7 +36,7 @@
#include "mongo/base/owned_pointer_map.h"
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/client/dbclientinterface.h" // ConnectionString (header-only)
+#include "mongo/client/connection_string.h"
#include "mongo/s/client/multi_command_dispatch.h"
#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/write_error_detail.h"
@@ -44,7 +44,6 @@
namespace mongo {
-using std::endl;
using std::make_pair;
using std::stringstream;
using std::vector;
@@ -97,8 +96,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse) {
LOG(4) << "starting execution of write batch of size "
- << static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS()
- << endl;
+ << static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS();
BatchWriteOp batchOp;
batchOp.initClientRequest(&clientRequest);
@@ -193,7 +191,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
buildErrorFrom(resolveStatus, &error);
LOG(4) << "unable to send write batch to " << shardHost.toString()
- << causedBy(resolveStatus.toString()) << endl;
+ << causedBy(resolveStatus.toString());
batchOp.noteBatchError(*nextBatch, error);
@@ -223,7 +221,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
request.setNS(nss);
LOG(4) << "sending write batch to " << shardHost.toString() << ": "
- << request.toString() << endl;
+ << request.toString();
_dispatcher->addCommand(shardHost, nss.db(), request.toBSON());
@@ -260,7 +258,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
trackedErrors.startTracking(ErrorCodes::StaleShardVersion);
LOG(4) << "write results received from " << shardHost.toString() << ": "
- << response.toString() << endl;
+ << response.toString();
// Dispatch was ok, note response
batchOp.noteBatchResponse(*batch, response, &trackedErrors);
@@ -297,7 +295,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
buildErrorFrom(Status(ErrorCodes::RemoteResultsUnavailable, msg.str()), &error);
LOG(4) << "unable to receive write results from " << shardHost.toString()
- << causedBy(dispatchStatus.toString()) << endl;
+ << causedBy(dispatchStatus.toString());
batchOp.noteBatchError(*batch, error);
}
@@ -323,7 +321,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
if (!refreshStatus.isOK()) {
// It's okay if we can't refresh, we'll just record errors for the ops if
// needed.
- warning() << "could not refresh targeter" << causedBy(refreshStatus.reason()) << endl;
+ warning() << "could not refresh targeter" << causedBy(refreshStatus.reason());
}
//
@@ -359,7 +357,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
? " and"
: "")
<< (clientResponse->isWriteConcernErrorSet() ? " with write concern error" : "")
- << " for " << clientRequest.getNS() << endl;
+ << " for " << clientRequest.getNS();
}
const BatchWriteExecStats& BatchWriteExec::getStats() {
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index f1e654c5a33..65658935818 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -26,25 +26,25 @@
* it in the license file.
*/
-#include "mongo/s/write_ops/batch_write_exec.h"
-
+#include "mongo/platform/basic.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/s/client/mock_multi_write_command.h"
#include "mongo/s/mock_ns_targeter.h"
#include "mongo/s/mock_shard_resolver.h"
+#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
using std::unique_ptr;
using std::string;
using std::vector;
-using namespace mongo;
+namespace {
/**
* Mimics a single shard backend for a particular collection which can be initialized with a
@@ -308,4 +308,5 @@ TEST(BatchWriteExecTests, ManyStaleOpWithMigration) {
ASSERT_EQUALS(stats.numStaleBatches, 10);
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 57ea59c0b93..113dd9133f8 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/base/error_codes.h"
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 68dfc4deeb7..495b2ec22a0 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -26,45 +26,46 @@
* it in the license file.
*/
-#include "mongo/s/write_ops/batch_write_op.h"
+#include "mongo/platform/basic.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/s/mock_ns_targeter.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_delete_document.h"
+#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
using std::unique_ptr;
using std::string;
using std::vector;
-using namespace mongo;
+namespace {
-static void initTargeterFullRange(const NamespaceString& nss,
- const ShardEndpoint& endpoint,
- MockNSTargeter* targeter) {
+void initTargeterFullRange(const NamespaceString& nss,
+ const ShardEndpoint& endpoint,
+ MockNSTargeter* targeter) {
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
targeter->init(mockRanges);
}
-static void initTargeterSplitRange(const NamespaceString& nss,
- const ShardEndpoint& endpointA,
- const ShardEndpoint& endpointB,
- MockNSTargeter* targeter) {
+void initTargeterSplitRange(const NamespaceString& nss,
+ const ShardEndpoint& endpointA,
+ const ShardEndpoint& endpointB,
+ MockNSTargeter* targeter) {
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
mockRanges.push_back(new MockRange(endpointB, nss, BSON("x" << 0), BSON("x" << MAXKEY)));
targeter->init(mockRanges);
}
-static void initTargeterHalfRange(const NamespaceString& nss,
- const ShardEndpoint& endpoint,
- MockNSTargeter* targeter) {
+void initTargeterHalfRange(const NamespaceString& nss,
+ const ShardEndpoint& endpoint,
+ MockNSTargeter* targeter) {
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << 0)));
@@ -73,14 +74,14 @@ static void initTargeterHalfRange(const NamespaceString& nss,
targeter->init(mockRanges);
}
-static BatchedDeleteDocument* buildDelete(const BSONObj& query, int limit) {
+BatchedDeleteDocument* buildDelete(const BSONObj& query, int limit) {
BatchedDeleteDocument* deleteDoc = new BatchedDeleteDocument;
deleteDoc->setQuery(query);
deleteDoc->setLimit(limit);
return deleteDoc;
}
-static BatchedUpdateDocument* buildUpdate(const BSONObj& query, bool multi) {
+BatchedUpdateDocument* buildUpdate(const BSONObj& query, bool multi) {
BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
updateDoc->setUpdateExpr(BSONObj());
updateDoc->setQuery(query);
@@ -88,9 +89,7 @@ static BatchedUpdateDocument* buildUpdate(const BSONObj& query, bool multi) {
return updateDoc;
}
-static BatchedUpdateDocument* buildUpdate(const BSONObj& query,
- const BSONObj& updateExpr,
- bool multi) {
+BatchedUpdateDocument* buildUpdate(const BSONObj& query, const BSONObj& updateExpr, bool multi) {
BatchedUpdateDocument* updateDoc = new BatchedUpdateDocument;
updateDoc->setQuery(query);
updateDoc->setUpdateExpr(updateExpr);
@@ -98,14 +97,14 @@ static BatchedUpdateDocument* buildUpdate(const BSONObj& query,
return updateDoc;
}
-static void buildResponse(int n, BatchedCommandResponse* response) {
+void buildResponse(int n, BatchedCommandResponse* response) {
response->clear();
response->setOk(true);
response->setN(n);
ASSERT(response->isValid(NULL));
}
-static void buildErrResponse(int code, const string& message, BatchedCommandResponse* response) {
+void buildErrResponse(int code, const string& message, BatchedCommandResponse* response) {
response->clear();
response->setOk(false);
response->setN(0);
@@ -114,7 +113,7 @@ static void buildErrResponse(int code, const string& message, BatchedCommandResp
ASSERT(response->isValid(NULL));
}
-static void addError(int code, const string& message, int index, BatchedCommandResponse* response) {
+void addError(int code, const string& message, int index, BatchedCommandResponse* response) {
unique_ptr<WriteErrorDetail> error(new WriteErrorDetail);
error->setErrCode(code);
error->setErrMessage(message);
@@ -123,7 +122,7 @@ static void addError(int code, const string& message, int index, BatchedCommandR
response->addToErrDetails(error.release());
}
-static void addWCError(BatchedCommandResponse* response) {
+void addWCError(BatchedCommandResponse* response) {
unique_ptr<WCErrorDetail> error(new WCErrorDetail);
error->setErrCode(ErrorCodes::WriteConcernFailed);
error->setErrMessage("mock wc error");
@@ -1190,7 +1189,6 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
ASSERT(clientResponse.isWriteConcernErrorSet());
}
-
TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
//
// Single-op (ordered) error and write concern error test
@@ -1810,5 +1808,5 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
ASSERT(batchOp.isFinished());
}
-
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp
index 539d1ddc57f..f4962354e55 100644
--- a/src/mongo/s/write_ops/batched_command_request.cpp
+++ b/src/mongo/s/write_ops/batched_command_request.cpp
@@ -261,10 +261,6 @@ void BatchedCommandRequest::setMetadata(BatchedRequestMetadata* metadata) {
INVOKE(setMetadata, metadata);
}
-void BatchedCommandRequest::unsetMetadata() {
- INVOKE(unsetMetadata);
-}
-
bool BatchedCommandRequest::isMetadataSet() const {
INVOKE(isMetadataSet);
}
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index 5e47e6ea32c..0325baa7ec4 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -28,7 +28,6 @@
#pragma once
-
#include "mongo/base/disallow_copying.h"
#include "mongo/s/write_ops/batched_insert_request.h"
#include "mongo/s/write_ops/batched_update_request.h"
@@ -130,7 +129,6 @@ public:
bool getOrdered() const;
void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
bool isMetadataSet() const;
BatchedRequestMetadata* getMetadata() const;
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 55c87eb9b55..ea499957033 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -36,7 +36,7 @@
namespace mongo {
namespace {
-TEST(BatchedCommandRequest, BasicInsertClone) {
+TEST(BatchedCommandRequest, InsertClone) {
auto insertRequest = stdx::make_unique<BatchedInsertRequest>();
BatchedCommandRequest batchedRequest(insertRequest.release());
@@ -83,7 +83,7 @@ TEST(BatchedCommandRequest, InsertIndexClone) {
ASSERT_EQ(indexSpec, insertDocs.front());
}
-TEST(BatchedCommandRequest, BasicInsertCloneWithId) {
+TEST(BatchedCommandRequest, InsertCloneWithId) {
auto insertRequest = stdx::make_unique<BatchedInsertRequest>();
insertRequest->setOrdered(true);
insertRequest->setWriteConcern(BSON("w" << 2));
@@ -111,7 +111,7 @@ TEST(BatchedCommandRequest, BasicInsertCloneWithId) {
ASSERT_EQ(4, insertDoc["x"].numberLong());
}
-TEST(BatchedCommandRequest, BasicUpdateClone) {
+TEST(BatchedCommandRequest, UpdateClone) {
auto insertRequest = stdx::make_unique<BatchedUpdateRequest>();
BatchedCommandRequest batchedRequest(insertRequest.release());
@@ -128,7 +128,7 @@ TEST(BatchedCommandRequest, BasicUpdateClone) {
ASSERT_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
}
-TEST(BatchedCommandRequest, BasicDeleteClone) {
+TEST(BatchedCommandRequest, DeleteClone) {
auto insertRequest = stdx::make_unique<BatchedDeleteRequest>();
BatchedCommandRequest batchedRequest(insertRequest.release());
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 8587fc92eb6..1934bea70ca 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,35 +17,31 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
-#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/platform/basic.h"
-#include <cstdint>
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
-using mongo::BSONArray;
-using mongo::BSONObj;
-using mongo::BatchedCommandResponse;
-using mongo::WriteErrorDetail;
-using mongo::WCErrorDetail;
-using mongo::Date_t;
using std::string;
-TEST(RoundTrip, Normal) {
+namespace {
+
+TEST(BatchedCommandResponse, Basic) {
BSONArray writeErrorsArray = BSON_ARRAY(
BSON(WriteErrorDetail::index(0) << WriteErrorDetail::errCode(-2)
<< WriteErrorDetail::errInfo(BSON("more info" << 1))
@@ -75,4 +71,5 @@ TEST(RoundTrip, Normal) {
ASSERT_EQUALS(0, genResponseObj.woCompare(origResponseObj));
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_delete_request.cpp b/src/mongo/s/write_ops/batched_delete_request.cpp
index 2cc5bd67236..fa5c07f6fba 100644
--- a/src/mongo/s/write_ops/batched_delete_request.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request.cpp
@@ -290,10 +290,6 @@ void BatchedDeleteRequest::setMetadata(BatchedRequestMetadata* metadata) {
_metadata.reset(metadata);
}
-void BatchedDeleteRequest::unsetMetadata() {
- _metadata.reset();
-}
-
bool BatchedDeleteRequest::isMetadataSet() const {
return _metadata.get();
}
diff --git a/src/mongo/s/write_ops/batched_delete_request.h b/src/mongo/s/write_ops/batched_delete_request.h
index 4c25fb09653..44c6dbb152d 100644
--- a/src/mongo/s/write_ops/batched_delete_request.h
+++ b/src/mongo/s/write_ops/batched_delete_request.h
@@ -110,7 +110,6 @@ public:
* metadata ownership will be transferred to this.
*/
void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
bool isMetadataSet() const;
BatchedRequestMetadata* getMetadata() const;
diff --git a/src/mongo/s/write_ops/batched_delete_request_test.cpp b/src/mongo/s/write_ops/batched_delete_request_test.cpp
index c00db24f11a..87d883f1b0b 100644
--- a/src/mongo/s/write_ops/batched_delete_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,37 +17,31 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
-#include "mongo/s/write_ops/batched_delete_request.h"
+#include "mongo/platform/basic.h"
#include <string>
#include "mongo/db/jsobj.h"
#include "mongo/s/write_ops/batched_delete_document.h"
+#include "mongo/s/write_ops/batched_delete_request.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
-using mongo::BSONArray;
-using mongo::BSONObj;
-using mongo::BatchedDeleteRequest;
-using mongo::BatchedDeleteDocument;
-using mongo::BatchedRequestMetadata;
-using mongo::BSONArrayBuilder;
-using mongo::OID;
-using mongo::Timestamp;
using std::string;
+namespace {
-TEST(RoundTrip, Normal) {
+TEST(BatchedDeleteRequest, Basic) {
BSONArray deleteArray = BSON_ARRAY(
BSON(BatchedDeleteDocument::query(BSON("a" << 1)) << BatchedDeleteDocument::limit(1))
<< BSON(BatchedDeleteDocument::query(BSON("b" << 1)) << BatchedDeleteDocument::limit(1)));
@@ -80,4 +74,5 @@ TEST(RoundTrip, Normal) {
ASSERT_EQUALS(0, genDeleteRequestObj.woCompare(origDeleteRequestObj));
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_insert_request.cpp b/src/mongo/s/write_ops/batched_insert_request.cpp
index 36250a350da..e224020eaa4 100644
--- a/src/mongo/s/write_ops/batched_insert_request.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request.cpp
@@ -304,10 +304,6 @@ void BatchedInsertRequest::setMetadata(BatchedRequestMetadata* metadata) {
_metadata.reset(metadata);
}
-void BatchedInsertRequest::unsetMetadata() {
- _metadata.reset();
-}
-
bool BatchedInsertRequest::isMetadataSet() const {
return _metadata.get();
}
diff --git a/src/mongo/s/write_ops/batched_insert_request.h b/src/mongo/s/write_ops/batched_insert_request.h
index 7799026aa1f..3a43aa861da 100644
--- a/src/mongo/s/write_ops/batched_insert_request.h
+++ b/src/mongo/s/write_ops/batched_insert_request.h
@@ -117,7 +117,6 @@ public:
* metadata ownership will be transferred to this.
*/
void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
bool isMetadataSet() const;
BatchedRequestMetadata* getMetadata() const;
diff --git a/src/mongo/s/write_ops/batched_insert_request_test.cpp b/src/mongo/s/write_ops/batched_insert_request_test.cpp
index acf2552e486..e31503d431f 100644
--- a/src/mongo/s/write_ops/batched_insert_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,30 +17,32 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
-#include "mongo/s/write_ops/batched_insert_request.h"
+#include "mongo/platform/basic.h"
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/unittest/unittest.h"
#include "mongo/s/write_ops/batched_command_request.h"
+#include "mongo/s/write_ops/batched_insert_request.h"
+#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
-using namespace mongo;
using std::unique_ptr;
using std::string;
-TEST(RoundTrip, Normal) {
+namespace {
+
+TEST(BatchedInsertRequest, Basic) {
BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1));
BSONObj writeConcernObj = BSON("w" << 1);
@@ -71,7 +73,7 @@ TEST(RoundTrip, Normal) {
ASSERT_EQUALS(0, genInsertRequestObj.woCompare(origInsertRequestObj));
}
-TEST(GenID, All) {
+TEST(BatchedInsertRequest, GenIDAll) {
BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
@@ -97,7 +99,7 @@ TEST(GenID, All) {
ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
}
-TEST(GenID, Partial) {
+TEST(BatchedInsertRequest, GenIDPartial) {
BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
@@ -127,7 +129,7 @@ TEST(GenID, Partial) {
ASSERT_EQUALS(idRequest->getDocumentsAt(1).nFields(), 2);
}
-TEST(GenID, None) {
+TEST(BatchedInsertRequest, GenIDNone) {
BatchedCommandRequest cmdRequest(BatchedCommandRequest::BatchType_Insert);
BatchedInsertRequest& request = *cmdRequest.getInsertRequest();
@@ -144,5 +146,5 @@ TEST(GenID, None) {
ASSERT(!idCmdRequest.get());
}
-
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_request_metadata_test.cpp b/src/mongo/s/write_ops/batched_request_metadata_test.cpp
index e0ca03231cc..f35bcee4ef9 100644
--- a/src/mongo/s/write_ops/batched_request_metadata_test.cpp
+++ b/src/mongo/s/write_ops/batched_request_metadata_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -12,25 +12,34 @@
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
-#include "mongo/s/write_ops/batched_request_metadata.h"
+#include "mongo/platform/basic.h"
#include <string>
+#include "mongo/s/write_ops/batched_request_metadata.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
-using mongo::BSONArray;
-using mongo::BSONArrayBuilder;
-using mongo::BSONObj;
-using mongo::BatchedRequestMetadata;
-using mongo::OID;
-using mongo::Timestamp;
using std::string;
-TEST(RoundTrip, Normal) {
+namespace {
+
+TEST(BatchedRequestMetadata, Basic) {
// The BSON_ARRAY macro doesn't support Timestamps.
BSONArrayBuilder arrBuilder;
arrBuilder.append(Timestamp(1, 1));
@@ -43,11 +52,11 @@ TEST(RoundTrip, Normal) {
string errMsg;
BatchedRequestMetadata metadata;
- bool ok = metadata.parseBSON(metadataObj, &errMsg);
- ASSERT_TRUE(ok);
+ ASSERT_TRUE(metadata.parseBSON(metadataObj, &errMsg));
BSONObj genMetadataObj = metadata.toBSON();
- ASSERT_EQUALS(0, genMetadataObj.woCompare(metadataObj));
+ ASSERT_EQUALS(metadataObj, genMetadataObj);
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/batched_update_request.cpp b/src/mongo/s/write_ops/batched_update_request.cpp
index b2bb70bfa60..3baeaad2829 100644
--- a/src/mongo/s/write_ops/batched_update_request.cpp
+++ b/src/mongo/s/write_ops/batched_update_request.cpp
@@ -307,10 +307,6 @@ void BatchedUpdateRequest::setMetadata(BatchedRequestMetadata* metadata) {
_metadata.reset(metadata);
}
-void BatchedUpdateRequest::unsetMetadata() {
- _metadata.reset();
-}
-
bool BatchedUpdateRequest::isMetadataSet() const {
return _metadata.get();
}
diff --git a/src/mongo/s/write_ops/batched_update_request.h b/src/mongo/s/write_ops/batched_update_request.h
index 748c369d349..abb2dae533f 100644
--- a/src/mongo/s/write_ops/batched_update_request.h
+++ b/src/mongo/s/write_ops/batched_update_request.h
@@ -117,7 +117,6 @@ public:
* metadata ownership will be transferred to this.
*/
void setMetadata(BatchedRequestMetadata* metadata);
- void unsetMetadata();
bool isMetadataSet() const;
BatchedRequestMetadata* getMetadata() const;
diff --git a/src/mongo/s/write_ops/batched_update_request_test.cpp b/src/mongo/s/write_ops/batched_update_request_test.cpp
index a0c1f1a1ec6..04994733537 100644
--- a/src/mongo/s/write_ops/batched_update_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_update_request_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,36 +17,31 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
-#include "mongo/s/write_ops/batched_update_request.h"
+#include "mongo/platform/basic.h"
#include <string>
#include "mongo/db/jsobj.h"
#include "mongo/s/write_ops/batched_update_document.h"
+#include "mongo/s/write_ops/batched_update_request.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
using std::string;
-using mongo::BatchedUpdateDocument;
-using mongo::BatchedUpdateRequest;
-using mongo::BatchedRequestMetadata;
-using mongo::BSONArray;
-using mongo::BSONArrayBuilder;
-using mongo::BSONObj;
-using mongo::OID;
-using mongo::Timestamp;
-TEST(RoundTrip, Normal) {
+namespace {
+
+TEST(BatchedUpdateRequest, Basic) {
BSONArray updateArray = BSON_ARRAY(
BSON(BatchedUpdateDocument::query(BSON("a" << 1))
<< BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("a" << 1)))
@@ -83,4 +78,5 @@ TEST(RoundTrip, Normal) {
ASSERT_EQUALS(0, genUpdateRequestObj.woCompare(origUpdateRequestObj));
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 24dd177cbda..25f9f13b3aa 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/s/write_ops/write_op.h"
#include "mongo/base/error_codes.h"
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index a180fc8abdc..e488c068e47 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 MongoDB Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -26,8 +26,7 @@
* it in the license file.
*/
-#include "mongo/s/write_ops/write_op.h"
-
+#include "mongo/platform/basic.h"
#include "mongo/base/error_codes.h"
#include "mongo/base/owned_pointer_vector.h"
@@ -36,15 +35,16 @@
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_delete_document.h"
#include "mongo/s/write_ops/write_error_detail.h"
+#include "mongo/s/write_ops/write_op.h"
#include "mongo/unittest/unittest.h"
-namespace {
+namespace mongo {
using std::unique_ptr;
using std::string;
using std::vector;
-using namespace mongo;
+namespace {
WriteErrorDetail* buildError(int code, const BSONObj& info, const string& message) {
WriteErrorDetail* error = new WriteErrorDetail();
@@ -364,4 +364,5 @@ TEST(WriteOpTests, RetrySingleOp) {
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready);
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo