summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-07-10 12:42:27 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-07-11 05:49:49 -0400
commit48432412d79d0712367837a12637d3682c04fddb (patch)
treec78e1725fe7bfd1564ba89849aeaa8ac07bf4ffa /src
parentac42068ddeaae27d2cd5cfc4808915491e5097c7 (diff)
downloadmongo-48432412d79d0712367837a12637d3682c04fddb.tar.gz
SERVER-18084 Move mongod metadata management to be under mongo/db
Moves the metadata management code specific to mongod under the mongo/db directory along with its tests.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/SConscript2
-rw-r--r--src/mongo/db/clientcursor.cpp2
-rw-r--r--src/mongo/db/clientcursor.h9
-rw-r--r--src/mongo/db/commands/cleanup_orphaned_cmd.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp5
-rw-r--r--src/mongo/db/commands/mr.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp3
-rw-r--r--src/mongo/db/dbhelpers.cpp62
-rw-r--r--src/mongo/db/exec/shard_filter.cpp2
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp10
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.h4
-rw-r--r--src/mongo/db/query/get_executor.cpp4
-rw-r--r--src/mongo/db/range_deleter.h2
-rw-r--r--src/mongo/db/s/SConscript34
-rw-r--r--src/mongo/db/s/collection_metadata.cpp (renamed from src/mongo/s/collection_metadata.cpp)43
-rw-r--r--src/mongo/db/s/collection_metadata.h (renamed from src/mongo/s/collection_metadata.h)2
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp (renamed from src/mongo/s/collection_metadata_test.cpp)10
-rw-r--r--src/mongo/db/s/metadata_loader.cpp (renamed from src/mongo/s/metadata_loader.cpp)32
-rw-r--r--src/mongo/db/s/metadata_loader.h (renamed from src/mongo/s/metadata_loader.h)0
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp (renamed from src/mongo/s/metadata_loader_test.cpp)12
-rw-r--r--src/mongo/db/s/sharding_state.cpp9
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp3
-rw-r--r--src/mongo/dbtests/merge_chunk_tests.cpp4
-rw-r--r--src/mongo/dbtests/sharding.cpp395
-rw-r--r--src/mongo/s/SConscript49
-rw-r--r--src/mongo/s/chunk_diff.cpp97
-rw-r--r--src/mongo/s/chunk_diff.h68
-rw-r--r--src/mongo/s/chunk_diff_test.cpp381
-rw-r--r--src/mongo/s/chunk_manager.cpp11
-rw-r--r--src/mongo/s/d_merge.cpp19
-rw-r--r--src/mongo/s/d_migrate.cpp4
-rw-r--r--src/mongo/s/d_split.cpp58
-rw-r--r--src/mongo/s/d_state.cpp60
33 files changed, 694 insertions, 706 deletions
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index 08ff1f3f687..a3e7f20f1af 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -604,7 +604,6 @@ serveronlyLibdeps = [
"$BUILD_DIR/mongo/s/catalog/replset/catalog_manager_replica_set",
"$BUILD_DIR/mongo/s/client/sharding_connection_hook",
"$BUILD_DIR/mongo/s/coreshard",
- "$BUILD_DIR/mongo/s/metadata",
"$BUILD_DIR/mongo/s/serveronly",
"$BUILD_DIR/mongo/scripting/scripting_server",
"$BUILD_DIR/mongo/util/elapsed_tracker",
@@ -637,6 +636,7 @@ serveronlyLibdeps = [
"repl/rslog",
"repl/sync_tail",
"repl/topology_coordinator_impl",
+ "s/metadata",
"s/sharding",
"startup_warnings_mongod",
"stats/counters",
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index c0b61f99404..7b45e3444ea 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -26,6 +26,8 @@
* it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/db/clientcursor.h"
#include <string>
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
index c8bd1853768..3f7e9797ae5 100644
--- a/src/mongo/db/clientcursor.h
+++ b/src/mongo/db/clientcursor.h
@@ -31,12 +31,10 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/record_id.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/util/net/message.h"
namespace mongo {
-class ClientCursor;
class Collection;
class CursorManager;
class RecoveryUnit;
@@ -290,13 +288,6 @@ private:
// TODO: Document.
uint64_t _leftoverMaxTimeMicros;
- // For chunks that are being migrated, there is a period of time when that chunks data is in
- // two shards, the donor and the receiver one. That data is picked up by a cursor on the
- // receiver side, even before the migration was decided. The CollectionMetadata allow one
- // to inquiry if any given document of the collection belongs indeed to this shard or if it
- // is coming from (or a vestige of) an ongoing migration.
- CollectionMetadataPtr _collMetadata;
-
// Only one of these is not-NULL.
RecoveryUnit* _unownedRU;
std::unique_ptr<RecoveryUnit> _ownedRU;
diff --git a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
index 3af1687c790..8d7a3088bee 100644
--- a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
@@ -45,8 +45,8 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/util/log.h"
namespace {
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 8db9d4576b8..890d76d0929 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -47,6 +47,7 @@
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/shard_key_pattern.h"
@@ -292,8 +293,8 @@ private:
invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
if (shardingState.enabled()) {
- CollectionMetadataPtr metadata(shardingState.getCollectionMetadata(ns.toString()));
-
+ std::shared_ptr<CollectionMetadata> metadata(
+ shardingState.getCollectionMetadata(ns.toString()));
if (metadata) {
ShardKeyPattern shardKeyPattern(metadata->getKeyPattern());
if (!shardKeyPattern.isUniqueIndexCompatible(newIdxKey)) {
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 5c6f38dbf48..6c45af50775 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -55,11 +55,11 @@
#include "mongo/db/query/query_planner.h"
#include "mongo/db/range_preserver.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/config.h"
#include "mongo/s/d_state.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 428b9873133..d6475bd8cca 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -65,13 +65,12 @@
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/stats/counters.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/write_concern.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/stale_exception.h"
#include "mongo/s/write_ops/batched_upsert_detail.h"
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 9580fd07114..6618ef14b9b 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -1,32 +1,30 @@
-// dbhelpers.cpp
-
/**
-* Copyright (C) 2008-2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects for
-* all of the code used other than as permitted herein. If you modify file(s)
-* with this exception, you may extend this exception to your version of the
-* file(s), but you are not obligated to do so. If you do not wish to do so,
-* delete this exception statement from your version. If you delete this
-* exception statement from all source files in the program, then also delete
-* it in the license file.
-*/
+ * Copyright (C) 2008-2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
@@ -61,6 +59,7 @@
#include "mongo/db/storage_options.h"
#include "mongo/db/write_concern.h"
#include "mongo/db/write_concern_options.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
@@ -391,10 +390,11 @@ long long Helpers::removeRange(OperationContext* txn,
// in the future we might want to.
verify(shardingState.enabled());
- // In write lock, so will be the most up-to-date version
- CollectionMetadataPtr metadataNow = shardingState.getCollectionMetadata(ns);
-
bool docIsOrphan;
+
+ // In write lock, so will be the most up-to-date version
+ std::shared_ptr<CollectionMetadata> metadataNow =
+ shardingState.getCollectionMetadata(ns);
if (metadataNow) {
ShardKeyPattern kp(metadataNow->getKeyPattern());
BSONObj key = kp.extractShardKeyFromDoc(obj);
diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp
index 8b28453caf8..26fe05144dd 100644
--- a/src/mongo/db/exec/shard_filter.cpp
+++ b/src/mongo/db/exec/shard_filter.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/s/collection_metadata.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index b003169edf2..fe0b9710f30 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -34,20 +34,24 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/field_ref.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/chunk_version.h"
namespace mongo {
+
namespace {
-CollectionMetadataPtr getMetadata(const NamespaceString& nsString) {
+
+std::shared_ptr<CollectionMetadata> getMetadata(const NamespaceString& nsString) {
if (shardingState.enabled()) {
return shardingState.getCollectionMetadata(nsString.ns());
}
- return CollectionMetadataPtr();
-}
+ return nullptr;
}
+} // namespace
+
UpdateLifecycleImpl::UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsStr)
: _nsString(nsStr),
_shardVersion((!ignoreVersion && getMetadata(_nsString))
diff --git a/src/mongo/db/ops/update_lifecycle_impl.h b/src/mongo/db/ops/update_lifecycle_impl.h
index 114ebd72ba2..d7e5616f4eb 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.h
+++ b/src/mongo/db/ops/update_lifecycle_impl.h
@@ -31,10 +31,12 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/ops/update_lifecycle.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/s/chunk_version.h"
namespace mongo {
+class Collection;
+
class UpdateLifecycleImpl : public UpdateLifecycle {
MONGO_DISALLOW_COPYING(UpdateLifecycleImpl);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 86bcc3beddf..bd32c486c53 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -70,6 +70,7 @@
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/storage_options.h"
#include "mongo/db/storage/oplog_hack.h"
@@ -168,9 +169,8 @@ void fillOutPlannerParams(OperationContext* txn,
// If the caller wants a shard filter, make sure we're actually sharded.
if (plannerParams->options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
- CollectionMetadataPtr collMetadata =
+ std::shared_ptr<CollectionMetadata> collMetadata =
shardingState.getCollectionMetadata(canonicalQuery->ns());
-
if (collMetadata) {
plannerParams->shardKey = collMetadata->getKeyPattern();
} else {
diff --git a/src/mongo/db/range_deleter.h b/src/mongo/db/range_deleter.h
index f58625f8083..39f30e17a62 100644
--- a/src/mongo/db/range_deleter.h
+++ b/src/mongo/db/range_deleter.h
@@ -37,7 +37,7 @@
#include "mongo/base/string_data.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context.h"
+#include "mongo/db/range_arithmetic.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 1ed15c8bfac..ada4ca88974 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -3,6 +3,22 @@
Import("env")
env.Library(
+ target='metadata',
+ source=[
+ 'collection_metadata.cpp',
+ 'metadata_loader.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base/base',
+ '$BUILD_DIR/mongo/bson/bson',
+ '$BUILD_DIR/mongo/db/common',
+ '$BUILD_DIR/mongo/db/range_arithmetic',
+ '$BUILD_DIR/mongo/s/catalog/catalog_types',
+ '$BUILD_DIR/mongo/s/common',
+ ]
+)
+
+env.Library(
target='sharding',
source=[
'sharded_connection_info.cpp',
@@ -15,3 +31,21 @@ env.Library(
'$BUILD_DIR/mongo/db/common',
]
)
+
+env.CppUnitTest(
+ target='metadata_test',
+ source=[
+ 'metadata_loader_test.cpp',
+ 'collection_metadata_test.cpp',
+ ],
+ LIBDEPS=[
+ 'metadata',
+ '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
+ '$BUILD_DIR/mongo/db/common',
+ '$BUILD_DIR/mongo/db/coredb',
+ '$BUILD_DIR/mongo/dbtests/mocklib',
+ '$BUILD_DIR/mongo/s/catalog/legacy/catalog_manager_legacy',
+ '$BUILD_DIR/mongo/s/coreshard',
+ '$BUILD_DIR/mongo/s/mongoscore',
+ ]
+) \ No newline at end of file
diff --git a/src/mongo/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 05d448dc239..b8c9cfa7b7a 100644
--- a/src/mongo/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -30,25 +30,24 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/collection_metadata.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/bson/util/builder.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
using std::unique_ptr;
-using std::endl;
using std::make_pair;
using std::string;
using std::vector;
+using str::stream;
-using mongoutils::str::stream;
+CollectionMetadata::CollectionMetadata() = default;
-CollectionMetadata::CollectionMetadata() {}
-
-CollectionMetadata::~CollectionMetadata() {}
+CollectionMetadata::~CollectionMetadata() = default;
CollectionMetadata* CollectionMetadata::cloneMigrate(const ChunkType& chunk,
const ChunkVersion& newShardVersion,
@@ -72,7 +71,7 @@ CollectionMetadata* CollectionMetadata::cloneMigrate(const ChunkType& chunk,
*errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -83,7 +82,7 @@ CollectionMetadata* CollectionMetadata::cloneMigrate(const ChunkType& chunk,
<< newShardVersion.toString() << " when removing last chunk "
<< rangeToString(chunk.getMin(), chunk.getMax());
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
}
@@ -96,7 +95,7 @@ CollectionMetadata* CollectionMetadata::cloneMigrate(const ChunkType& chunk,
<< " is not greater than the current shard version "
<< _shardVersion.toString();
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -130,7 +129,7 @@ CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk,
*errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
<< " with zero shard version";
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -144,7 +143,7 @@ CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk,
*errMsg = stream() << "cannot add chunk " << rangeToString(chunk.getMin(), chunk.getMax())
<< " because the chunk overlaps " << overlapToString(overlap);
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -184,7 +183,7 @@ CollectionMetadata* CollectionMetadata::cloneMinusPending(const ChunkType& pendi
*errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -219,7 +218,7 @@ CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pendin
<< rangeToString(pending.getMin(), pending.getMax())
<< " because the chunk overlaps " << overlapToString(overlap);
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -245,7 +244,7 @@ CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pendin
warning() << "new pending chunk " << rangeToString(pending.getMin(), pending.getMax())
<< " overlaps existing pending chunks " << overlapToString(pendingOverlap)
- << ", a migration may not have completed" << endl;
+ << ", a migration may not have completed";
for (RangeVector::iterator it = pendingOverlap.begin(); it != pendingOverlap.end(); ++it) {
metadata->_pendingMap.erase(it->first);
@@ -281,7 +280,7 @@ CollectionMetadata* CollectionMetadata::cloneSplit(const ChunkType& chunk,
<< ", new shard version " << newShardVersion.toString()
<< " is not greater than current version " << _shardVersion.toString();
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -297,7 +296,7 @@ CollectionMetadata* CollectionMetadata::cloneSplit(const ChunkType& chunk,
*errMsg += stream() << " and it overlaps " << overlapToString(overlap);
}
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -308,7 +307,7 @@ CollectionMetadata* CollectionMetadata::cloneSplit(const ChunkType& chunk,
<< rangeToString(chunk.getMin(), chunk.getMax()) << " at key "
<< *it;
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
}
@@ -348,7 +347,7 @@ CollectionMetadata* CollectionMetadata::cloneMerge(const BSONObj& minKey,
<< ", new shard version " << newShardVersion.toString()
<< " is not greater than current version " << _shardVersion.toString();
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -360,7 +359,7 @@ CollectionMetadata* CollectionMetadata::cloneMerge(const BSONObj& minKey,
<< (overlap.empty() ? ", no chunks found in this range"
: ", only one chunk found in this range");
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -390,7 +389,7 @@ CollectionMetadata* CollectionMetadata::cloneMerge(const BSONObj& minKey,
<< (!validStartEnd ? " do not have the same min and max key"
: " are not all adjacent");
- warning() << *errMsg << endl;
+ warning() << *errMsg;
return NULL;
}
@@ -436,10 +435,10 @@ bool CollectionMetadata::keyBelongsToMe(const BSONObj& key) const {
// Logs if the point doesn't belong here.
if ( !good ) {
log() << "bad: " << key << " " << it->first << " " << key.woCompare( it->first ) << " "
- << key.woCompare( it->second ) << endl;
+ << key.woCompare( it->second );
for ( RangeMap::const_iterator i = _rangesMap.begin(); i != _rangesMap.end(); ++i ) {
- log() << "\t" << i->first << "\t" << i->second << "\t" << endl;
+ log() << "\t" << i->first << "\t" << i->second << "\t";
}
}
#endif
diff --git a/src/mongo/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 5f3ae419dbd..0955394747d 100644
--- a/src/mongo/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -34,11 +34,11 @@
#include "mongo/db/field_ref_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/range_arithmetic.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk_version.h"
namespace mongo {
+class ChunkType;
class MetadataLoader;
class CollectionMetadata;
diff --git a/src/mongo/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 8fe16a8d5f5..3df8e8b82f7 100644
--- a/src/mongo/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -32,21 +32,20 @@
#include <vector>
#include "mongo/db/jsobj.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/metadata_loader.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
#include "mongo/dbtests/mock/mock_remote_db_server.h"
#include "mongo/s/catalog/legacy/catalog_manager_legacy.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/collection_metadata.h"
-#include "mongo/s/metadata_loader.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
using std::make_pair;
using std::string;
using std::unique_ptr;
@@ -1318,4 +1317,5 @@ TEST_F(ThreeChunkWithRangeGapFixture, CannotMergeWithHole) {
ASSERT(!errMsg.empty());
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index 4bb156f156d..f6b72823cf2 100644
--- a/src/mongo/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -30,34 +30,35 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/metadata_loader.h"
+#include "mongo/db/s/metadata_loader.h"
#include <vector>
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_diff.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/util/log.h"
namespace mongo {
using std::unique_ptr;
-using std::endl;
using std::make_pair;
using std::map;
using std::pair;
using std::string;
+namespace {
+
/**
* This is an adapter so we can use config diffs - mongos and mongod do them slightly
* differently.
*
* The mongod adapter here tracks only a single shard, and stores ranges by (min, max).
*/
-class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj, string> {
+class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj> {
public:
SCMConfigDiffTracker(const string& currShard) : _currShard(currShard) {}
@@ -77,16 +78,19 @@ public:
return shard;
}
- string _currShard;
+private:
+ const string _currShard;
};
+} // namespace
+
//
// MetadataLoader implementation
//
-MetadataLoader::MetadataLoader() {}
+MetadataLoader::MetadataLoader() = default;
-MetadataLoader::~MetadataLoader() {}
+MetadataLoader::~MetadataLoader() = default;
Status MetadataLoader::makeCollectionMetadata(CatalogManager* catalogManager,
const string& ns,
@@ -153,11 +157,11 @@ Status MetadataLoader::initChunks(CatalogManager* catalogManager,
LOG(2) << "loading new chunks for collection " << ns
<< " using old metadata w/ version " << oldMetadata->getShardVersion() << " and "
- << metadata->_chunksMap.size() << " chunks" << endl;
+ << metadata->_chunksMap.size() << " chunks";
} else {
warning() << "reloading collection metadata for " << ns << " with new epoch "
<< epoch.toString() << ", the current epoch is "
- << oldMetadata->getCollVersion().epoch().toString() << endl;
+ << oldMetadata->getCollVersion().epoch().toString();
}
}
@@ -169,9 +173,9 @@ Status MetadataLoader::initChunks(CatalogManager* catalogManager,
try {
std::vector<ChunkType> chunks;
- Query diffQuery = differ.configDiffQuery();
- Status status = catalogManager->getChunks(
- diffQuery.getFilter(), diffQuery.getSort(), boost::none, &chunks);
+ const auto diffQuery = differ.configDiffQuery();
+ Status status =
+ catalogManager->getChunks(diffQuery.query, diffQuery.sort, boost::none, &chunks);
if (!status.isOK()) {
if (status == ErrorCodes::HostUnreachable) {
// Make our metadata invalid
@@ -207,7 +211,7 @@ Status MetadataLoader::initChunks(CatalogManager* catalogManager,
<< "no chunks found when reloading " << ns << ", previous version was "
<< metadata->_collVersion.toString() << (fullReload ? ", this is a drop" : "");
- warning() << errMsg << endl;
+ warning() << errMsg;
metadata->_collVersion = ChunkVersion(0, 0, OID());
metadata->_chunksMap.clear();
@@ -263,7 +267,7 @@ Status MetadataLoader::promotePendingChunks(const CollectionMetadata* afterMetad
if (rangeMapContains(remoteMetadata->_chunksMap, it->first, it->second)) {
// Chunk was promoted from pending, successful migration
LOG(2) << "verified chunk " << rangeToString(it->first, it->second)
- << " was migrated earlier to this shard" << endl;
+ << " was migrated earlier to this shard";
remoteMetadata->_pendingMap.erase(it++);
} else {
diff --git a/src/mongo/s/metadata_loader.h b/src/mongo/db/s/metadata_loader.h
index 15ca227926e..15ca227926e 100644
--- a/src/mongo/s/metadata_loader.h
+++ b/src/mongo/db/s/metadata_loader.h
diff --git a/src/mongo/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index d38a29d2cf5..2d2adb706f2 100644
--- a/src/mongo/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -35,20 +35,19 @@
#include "mongo/client/connpool.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/metadata_loader.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
#include "mongo/dbtests/mock/mock_remote_db_server.h"
#include "mongo/s/catalog/legacy/catalog_manager_legacy.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/collection_metadata.h"
-#include "mongo/s/metadata_loader.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
+namespace mongo {
namespace {
-using namespace mongo;
-
using std::unique_ptr;
using std::string;
using std::vector;
@@ -920,5 +919,6 @@ TEST_F(MultipleMetadataFixture, PromotePendingBadOverlap) {
vector<ChunkVersion> _maxShardVersion;
};
#endif
-}
-// unnamed namespace
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index f6442c69744..dbcabce83c1 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -37,16 +37,17 @@
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_executor.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/metadata_loader.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/task_executor.h"
#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/legacy/catalog_manager_legacy.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/grid.h"
-#include "mongo/s/metadata_loader.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/sock.h"
@@ -60,6 +61,10 @@ using std::vector;
// Global sharding state instance
ShardingState shardingState;
+bool isMongos() {
+ return false;
+}
+
ShardingState::ShardingState()
: _enabled(false),
_configServerTickets(3 /* max number of concurrent config server refresh threads */) {}
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 18dc87428ce..ca4280ada04 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database_holder.h"
@@ -33,6 +35,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/range_arithmetic.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/dbtests/merge_chunk_tests.cpp b/src/mongo/dbtests/merge_chunk_tests.cpp
index 16e010edf27..a7b51248418 100644
--- a/src/mongo/dbtests/merge_chunk_tests.cpp
+++ b/src/mongo/dbtests/merge_chunk_tests.cpp
@@ -29,13 +29,13 @@
#include "mongo/platform/basic.h"
#include "mongo/db/range_arithmetic.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/dbtests/config_server_fixture.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
-#include "mongo/s/chunk.h" // for genID
+#include "mongo/s/chunk.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/d_merge.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index 4b15938625a..f5cfb7a8476 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -30,18 +30,14 @@
#include "mongo/platform/basic.h"
-#include "mongo/client/parallel.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/dbtests/config_server_fixture.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/chunk_diff.h"
+#include "mongo/s/client/shard_connection.h"
#include "mongo/s/chunk_manager.h"
-#include "mongo/s/chunk_version.h"
-#include "mongo/s/config.h"
namespace {
@@ -68,18 +64,6 @@ static int rand(int max = -1) {
}
//
-// Converts array of raw BSONObj chunks to a vector of ChunkType
-//
-void convertBSONArrayToChunkTypes(const BSONArray& chunksArray,
- std::vector<ChunkType>* chunksVector) {
- for (const BSONElement& obj : chunksArray) {
- auto chunkTypeRes = ChunkType::fromBSON(obj.Obj());
- ASSERT(chunkTypeRes.isOK());
- chunksVector->push_back(chunkTypeRes.getValue());
- }
-}
-
-//
// Sets up a basic environment for loading chunks to/from the direct database connection
// Redirects connections to the direct database for the duration of the test.
//
@@ -283,381 +267,6 @@ public:
}
};
-class ChunkDiffUnitTest {
-public:
- bool _inverse;
-
- typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
- typedef map<string, ChunkVersion> VersionMap;
-
- ChunkDiffUnitTest(bool inverse) : _inverse(inverse) {}
-
- // The default pass-through adapter for using config diffs
- class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj, string> {
- public:
- DefaultDiffAdapter() {}
- virtual ~DefaultDiffAdapter() {}
-
- virtual bool isTracked(const ChunkType& chunk) const {
- return true;
- }
-
- virtual pair<BSONObj, BSONObj> rangeFor(const ChunkType& chunk) const {
- return make_pair(chunk.getMin(), chunk.getMax());
- }
-
- virtual string shardFor(const string& name) const {
- return name;
- }
- };
-
- // Inverts the storage order for chunks from min to max
- class InverseDiffAdapter : public DefaultDiffAdapter {
- public:
- InverseDiffAdapter() {}
- virtual ~InverseDiffAdapter() {}
-
- virtual bool isMinKeyIndexed() const {
- return false;
- }
-
- virtual pair<BSONObj, BSONObj> rangeFor(const ChunkType& chunk) const {
- return make_pair(chunk.getMax(), chunk.getMin());
- }
- };
-
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the
- // diffs)
- void validate(const std::vector<ChunkType>& chunks,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- validate(chunks, NULL, maxVersion, maxShardVersions);
- }
-
- void validate(const std::vector<ChunkType>& chunks,
- const RangeMap& ranges,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- validate(chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions);
- }
-
- // Validates that the ranges and versions are valid given the chunks
- void validate(const std::vector<ChunkType>& chunks,
- RangeMap* ranges,
- ChunkVersion maxVersion,
- const VersionMap& maxShardVersions) {
- int chunkCount = chunks.size();
- ChunkVersion foundMaxVersion;
- VersionMap foundMaxShardVersions;
-
- //
- // Validate that all the chunks are there and collect versions
- //
-
- for (const ChunkType& chunk : chunks) {
- if (ranges != NULL) {
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
- // " << chunkCount << endl;
-
- RangeMap::iterator chunkRange =
- ranges->find(_inverse ? chunk.getMax() : chunk.getMin());
-
- ASSERT(chunkRange != ranges->end());
- ASSERT(chunkRange->second.woCompare(_inverse ? chunk.getMin() : chunk.getMax()) ==
- 0);
- }
-
- ChunkVersion version =
- ChunkVersion::fromBSON(chunk.toBSON()[ChunkType::DEPRECATED_lastmod()]);
- if (version > foundMaxVersion)
- foundMaxVersion = version;
-
- ChunkVersion shardMaxVersion = foundMaxShardVersions[chunk.getShard()];
- if (version > shardMaxVersion) {
- foundMaxShardVersions[chunk.getShard()] = version;
- }
- }
-
- // Make sure all chunks are accounted for
- if (ranges != NULL)
- ASSERT(chunkCount == (int)ranges->size());
-
- // log() << "Validating that all shard versions are up to date..." << endl;
-
- // Validate that all the versions are the same
- ASSERT(foundMaxVersion.equals(maxVersion));
-
- for (VersionMap::iterator it = foundMaxShardVersions.begin();
- it != foundMaxShardVersions.end();
- it++) {
- ChunkVersion foundVersion = it->second;
- VersionMap::const_iterator maxIt = maxShardVersions.find(it->first);
-
- ASSERT(maxIt != maxShardVersions.end());
- ASSERT(foundVersion.equals(maxIt->second));
- }
- // Make sure all shards are accounted for
- ASSERT(foundMaxShardVersions.size() == maxShardVersions.size());
- }
-
- void run() {
- int numShards = 10;
- int numInitialChunks = 5;
- int maxChunks = 100000; // Needed to not overflow the BSONArray's max bytes
- int keySize = 2;
-
- BSONArrayBuilder chunksB;
-
- BSONObj lastSplitPt;
- ChunkVersion version(1, 0, OID());
-
- //
- // Generate numChunks with a given key size over numShards
- // All chunks have double key values, so we can split them a bunch
- //
-
- for (int i = -1; i < numInitialChunks; i++) {
- BSONObjBuilder splitPtB;
- for (int k = 0; k < keySize; k++) {
- string field = string("k") + string(1, (char)('0' + k));
- if (i < 0)
- splitPtB.appendMinKey(field);
- else if (i < numInitialChunks - 1)
- splitPtB.append(field, (double)i);
- else
- splitPtB.appendMaxKey(field);
- }
- BSONObj splitPt = splitPtB.obj();
-
- if (i >= 0) {
- BSONObjBuilder chunkB;
-
- chunkB.append(ChunkType::name(), "$dummyname");
- chunkB.append(ChunkType::ns(), "$dummyns");
-
- chunkB.append(ChunkType::min(), lastSplitPt);
- chunkB.append(ChunkType::max(), splitPt);
-
- int shardNum = rand(numShards);
- chunkB.append(ChunkType::shard(), "shard" + string(1, (char)('A' + shardNum)));
-
- rand(2) ? version.incMajor() : version.incMinor();
- version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
-
- chunksB.append(chunkB.obj());
- }
-
- lastSplitPt = splitPt;
- }
-
- BSONArray chunks = chunksB.arr();
-
- // log() << "Chunks generated : " << chunks << endl;
-
- // Setup the empty ranges and versions first
- RangeMap ranges;
- ChunkVersion maxVersion = ChunkVersion(0, 0, OID());
- VersionMap maxShardVersions;
-
- // Create a differ which will track our progress
- std::shared_ptr<DefaultDiffAdapter> differ(_inverse ? new InverseDiffAdapter()
- : new DefaultDiffAdapter());
- differ->attach("test", ranges, maxVersion, maxShardVersions);
-
- std::vector<ChunkType> chunksVector;
- convertBSONArrayToChunkTypes(chunks, &chunksVector);
-
- // Validate initial load
- differ->calculateConfigDiff(chunksVector);
- validate(chunksVector, ranges, maxVersion, maxShardVersions);
-
- // Generate a lot of diffs, and keep validating that updating from the diffs always
- // gives us the right ranges and versions
-
- int numDiffs = 135; // Makes about 100000 chunks overall
- int numChunks = numInitialChunks;
- for (int i = 0; i < numDiffs; i++) {
- // log() << "Generating new diff... " << i << endl;
-
- BSONArrayBuilder diffsB;
- BSONArrayBuilder newChunksB;
- BSONObjIterator chunksIt(chunks);
-
- while (chunksIt.more()) {
- BSONObj chunk = chunksIt.next().Obj();
-
- int randChoice = rand(10);
-
- if (randChoice < 2 && numChunks < maxChunks) {
- // Simulate a split
-
- // log() << " ...starting a split with chunk " << chunk << endl;
-
- BSONObjBuilder leftB;
- BSONObjBuilder rightB;
- BSONObjBuilder midB;
-
- for (int k = 0; k < keySize; k++) {
- string field = string("k") + string(1, (char)('0' + k));
-
- BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
- double max =
- maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
- BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
- double min = minType == NumberDouble
- ? chunk[ChunkType::min()].Obj()[field].Number()
- : 0.0;
-
- if (minType == MinKey) {
- midB.append(field, max - 1.0);
- } else if (maxType == MaxKey) {
- midB.append(field, min + 1.0);
- } else {
- midB.append(field, (max + min) / 2.0);
- }
- }
-
- BSONObj midPt = midB.obj();
- // Only happens if we can't split the min chunk
- if (midPt.isEmpty())
- continue;
-
- leftB.append(chunk[ChunkType::min()]);
- leftB.append(ChunkType::max(), midPt);
- rightB.append(ChunkType::min(), midPt);
- rightB.append(chunk[ChunkType::max()]);
-
- // add required fields for ChunkType
- leftB.append(chunk[ChunkType::name()]);
- leftB.append(chunk[ChunkType::ns()]);
- rightB.append(chunk[ChunkType::name()]);
- rightB.append(chunk[ChunkType::ns()]);
-
- leftB.append(chunk[ChunkType::shard()]);
- rightB.append(chunk[ChunkType::shard()]);
-
- version.incMajor();
- version._minor = 0;
- version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
- version.incMinor();
- version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj left = leftB.obj();
- BSONObj right = rightB.obj();
-
- // log() << " ... split into " << left << " and " << right << endl;
-
- newChunksB.append(left);
- newChunksB.append(right);
-
- diffsB.append(right);
- diffsB.append(left);
-
- numChunks++;
- } else if (randChoice < 4 && chunksIt.more()) {
- // Simulate a migrate
-
- // log() << " ...starting a migrate with chunk " << chunk << endl;
-
- BSONObj prevShardChunk;
- while (chunksIt.more()) {
- prevShardChunk = chunksIt.next().Obj();
- if (prevShardChunk[ChunkType::shard()].String() ==
- chunk[ChunkType::shard()].String())
- break;
-
- // log() << "... appending chunk from diff shard: " << prevShardChunk <<
- // endl;
- newChunksB.append(prevShardChunk);
-
- prevShardChunk = BSONObj();
- }
-
- // We need to move between different shards, hence the weirdness in logic here
- if (!prevShardChunk.isEmpty()) {
- BSONObjBuilder newShardB;
- BSONObjBuilder prevShardB;
-
- newShardB.append(chunk[ChunkType::min()]);
- newShardB.append(chunk[ChunkType::max()]);
- prevShardB.append(prevShardChunk[ChunkType::min()]);
- prevShardB.append(prevShardChunk[ChunkType::max()]);
-
- // add required fields for ChunkType
- newShardB.append(chunk[ChunkType::name()]);
- newShardB.append(chunk[ChunkType::ns()]);
- prevShardB.append(chunk[ChunkType::name()]);
- prevShardB.append(chunk[ChunkType::ns()]);
-
- int shardNum = rand(numShards);
- newShardB.append(ChunkType::shard(),
- "shard" + string(1, (char)('A' + shardNum)));
- prevShardB.append(prevShardChunk[ChunkType::shard()]);
-
- version.incMajor();
- version._minor = 0;
- version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
- version.incMinor();
- version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
-
- BSONObj newShard = newShardB.obj();
- BSONObj prevShard = prevShardB.obj();
-
- // log() << " ... migrated to " << newShard << " and updated " << prevShard
- // << endl;
-
- newChunksB.append(newShard);
- newChunksB.append(prevShard);
-
- diffsB.append(newShard);
- diffsB.append(prevShard);
-
- } else {
- // log() << "... appending chunk, no more left: " << chunk << endl;
- newChunksB.append(chunk);
- }
- } else {
- // log() << "Appending chunk : " << chunk << endl;
- newChunksB.append(chunk);
- }
- }
-
- BSONArray diffs = diffsB.arr();
- chunks = newChunksB.arr();
-
- // log() << "Diffs generated : " << diffs << endl;
- // log() << "All chunks : " << chunks << endl;
-
- // Rarely entirely clear out our data
- if (rand(10) < 1) {
- diffs = chunks;
- ranges.clear();
- maxVersion = ChunkVersion(0, 0, OID());
- maxShardVersions.clear();
- }
-
- // log() << "Total number of chunks : " << numChunks << " iteration " << i << endl;
-
- std::vector<ChunkType> chunksVector;
- convertBSONArrayToChunkTypes(chunks, &chunksVector);
-
- differ->calculateConfigDiff(chunksVector);
-
- validate(chunksVector, ranges, maxVersion, maxShardVersions);
- }
- }
-};
-
-class ChunkDiffUnitTestNormal : public ChunkDiffUnitTest {
-public:
- ChunkDiffUnitTestNormal() : ChunkDiffUnitTest(false) {}
-};
-
-class ChunkDiffUnitTestInverse : public ChunkDiffUnitTest {
-public:
- ChunkDiffUnitTestInverse() : ChunkDiffUnitTest(true) {}
-};
-
class All : public Suite {
public:
All() : Suite("sharding") {}
@@ -666,8 +275,6 @@ public:
add<ChunkManagerCreateBasicTest>();
add<ChunkManagerCreateFullTest>();
add<ChunkManagerLoadBasicTest>();
- add<ChunkDiffUnitTestNormal>();
- add<ChunkDiffUnitTestInverse>();
}
};
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 1c054ceb35c..8cd2540322a 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -11,6 +11,17 @@ env.SConscript(
],
)
+# Functionality shared between mongod and mongos
+env.Library(
+ target='common',
+ source=[
+ 'chunk_diff.cpp',
+ ],
+ LIBDEPS=[
+ 'catalog/catalog_types',
+ ]
+)
+
env.Library(
target='shard_util',
source=[
@@ -21,45 +32,25 @@ env.Library(
]
)
-env.CppUnitTest('chunk_version_test', 'chunk_version_test.cpp',
- LIBDEPS=['$BUILD_DIR/mongo/db/common'])
-
-#
-# Support for maintaining persistent sharding state and data.
-#
-
-env.Library(
- target='metadata',
+env.CppUnitTest(
+ target='chunk_version_test',
source=[
- 'collection_metadata.cpp',
- 'metadata_loader.cpp',
+ 'chunk_version_test.cpp',
],
LIBDEPS=[
- 'catalog/catalog_types',
- '$BUILD_DIR/mongo/bson/bson',
- '$BUILD_DIR/mongo/base/base',
- '$BUILD_DIR/mongo/client/clientdriver',
- '$BUILD_DIR/mongo/db/range_arithmetic',
+ '$BUILD_DIR/mongo/db/common',
]
)
env.CppUnitTest(
- target='metadata_test',
+ target='chunk_diff_test',
source=[
'chunk_diff_test.cpp',
- 'metadata_loader_test.cpp',
- 'collection_metadata_test.cpp',
],
LIBDEPS=[
- 'catalog/legacy/catalog_manager_legacy',
- 'coreshard',
- 'metadata',
- 'mongoscore',
- '$BUILD_DIR/mongo/db/coredb',
- '$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
- '$BUILD_DIR/mongo/dbtests/mocklib',
- '$BUILD_DIR/mongo/db/common',
- ])
+ 'common',
+ ]
+)
#
# Write Operations
@@ -211,7 +202,6 @@ env.Library(
# This is only here temporarily for auto-split logic in chunk.cpp.
'balancer_policy.cpp',
'chunk.cpp',
- 'chunk_diff.cpp',
'chunk_manager.cpp',
'config.cpp',
'grid.cpp',
@@ -223,6 +213,7 @@ env.Library(
'catalog/catalog_types',
'client/sharding_client',
'cluster_ops_impl',
+ 'common',
'shard_util',
]
)
diff --git a/src/mongo/s/chunk_diff.cpp b/src/mongo/s/chunk_diff.cpp
index 3d6901aa8c2..e3b0f9fa2f0 100644
--- a/src/mongo/s/chunk_diff.cpp
+++ b/src/mongo/s/chunk_diff.cpp
@@ -32,17 +32,15 @@
#include "mongo/s/chunk_diff.h"
-#include "mongo/client/dbclientinterface.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/chunk.h"
#include "mongo/s/chunk_version.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
-template <class ValType, class ShardType>
-ConfigDiffTracker<ValType, ShardType>::ConfigDiffTracker() {
+template <class ValType>
+ConfigDiffTracker<ValType>::ConfigDiffTracker() {
_ns.clear();
_currMap = NULL;
_maxVersion = NULL;
@@ -50,14 +48,14 @@ ConfigDiffTracker<ValType, ShardType>::ConfigDiffTracker() {
_validDiffs = 0;
}
-template <class ValType, class ShardType>
-ConfigDiffTracker<ValType, ShardType>::~ConfigDiffTracker() = default;
+template <class ValType>
+ConfigDiffTracker<ValType>::~ConfigDiffTracker() = default;
-template <class ValType, class ShardType>
-void ConfigDiffTracker<ValType, ShardType>::attach(const std::string& ns,
- RangeMap& currMap,
- ChunkVersion& maxVersion,
- MaxChunkVersionMap& maxShardVersions) {
+template <class ValType>
+void ConfigDiffTracker<ValType>::attach(const std::string& ns,
+ RangeMap& currMap,
+ ChunkVersion& maxVersion,
+ MaxChunkVersionMap& maxShardVersions) {
_ns = ns;
_currMap = &currMap;
_maxVersion = &maxVersion;
@@ -65,16 +63,15 @@ void ConfigDiffTracker<ValType, ShardType>::attach(const std::string& ns,
_validDiffs = 0;
}
-template <class ValType, class ShardType>
-bool ConfigDiffTracker<ValType, ShardType>::isOverlapping(const BSONObj& min, const BSONObj& max) {
+template <class ValType>
+bool ConfigDiffTracker<ValType>::isOverlapping(const BSONObj& min, const BSONObj& max) {
RangeOverlap overlap = overlappingRange(min, max);
return overlap.first != overlap.second;
}
-template <class ValType, class ShardType>
-void ConfigDiffTracker<ValType, ShardType>::removeOverlapping(const BSONObj& min,
- const BSONObj& max) {
+template <class ValType>
+void ConfigDiffTracker<ValType>::removeOverlapping(const BSONObj& min, const BSONObj& max) {
_assertAttached();
RangeOverlap overlap = overlappingRange(min, max);
@@ -82,9 +79,9 @@ void ConfigDiffTracker<ValType, ShardType>::removeOverlapping(const BSONObj& min
_currMap->erase(overlap.first, overlap.second);
}
-template <class ValType, class ShardType>
-typename ConfigDiffTracker<ValType, ShardType>::RangeOverlap
-ConfigDiffTracker<ValType, ShardType>::overlappingRange(const BSONObj& min, const BSONObj& max) {
+template <class ValType>
+typename ConfigDiffTracker<ValType>::RangeOverlap ConfigDiffTracker<ValType>::overlappingRange(
+ const BSONObj& min, const BSONObj& max) {
_assertAttached();
typename RangeMap::iterator low;
@@ -111,29 +108,8 @@ ConfigDiffTracker<ValType, ShardType>::overlappingRange(const BSONObj& min, cons
return RangeOverlap(low, high);
}
-template <class ValType, class ShardType>
-int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff(CatalogManager* catalogManager) {
- _assertAttached();
-
- // Get the diff query required
- Query diffQuery = configDiffQuery();
-
- try {
- std::vector<ChunkType> chunks;
- uassertStatusOK(catalogManager->getChunks(
- diffQuery.getFilter(), diffQuery.getSort(), boost::none, &chunks));
-
- return calculateConfigDiff(chunks);
- } catch (DBException& e) {
- // Should only happen on connection errors
- e.addContext(str::stream() << "could not calculate config difference for ns " << _ns);
- throw;
- }
-}
-
-template <class ValType, class ShardType>
-int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff(
- const std::vector<ChunkType>& chunks) {
+template <class ValType>
+int ConfigDiffTracker<ValType>::calculateConfigDiff(const std::vector<ChunkType>& chunks) {
_assertAttached();
// Apply the chunk changes to the ranges and versions
@@ -173,7 +149,7 @@ int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff(
}
// Chunk version changes
- ShardType shard = shardFor(chunk.getShard());
+ ShardId shard = shardFor(chunk.getShard());
typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard);
if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) {
@@ -211,12 +187,13 @@ int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff(
return _validDiffs;
}
-template <class ValType, class ShardType>
-Query ConfigDiffTracker<ValType, ShardType>::configDiffQuery() const {
+template <class ValType>
+typename ConfigDiffTracker<ValType>::QueryAndSort ConfigDiffTracker<ValType>::configDiffQuery()
+ const {
_assertAttached();
- // Basic idea behind the query is to find all the chunks $gte the current max version.
- // Currently, any splits and merges will increment the current max version.
+ // The query has to find all the chunks $gte the current max version. Currently, any splits and
+ // merges will increment the current max version.
BSONObjBuilder queryB;
queryB.append(ChunkType::ns(), _ns);
@@ -226,14 +203,13 @@ Query ConfigDiffTracker<ValType, ShardType>::configDiffQuery() const {
tsBuilder.done();
}
- // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, IN ORDER TO HANDLE
- // CURSOR YIELDING BETWEEN CHUNKS BEING MIGRATED.
+ // NOTE: IT IS IMPORTANT FOR CONSISTENCY THAT WE SORT BY ASC VERSION, IN ORDER TO HANDLE CURSOR
+ // YIELDING BETWEEN CHUNKS BEING MIGRATED.
//
- // This ensures that changes to chunk version (which will always be higher) will always
- // come *after* our current position in the chunk cursor.
+ // This ensures that changes to chunk version (which will always be higher) will always come
+ // *after* our current position in the chunk cursor.
- Query queryObj(queryB.obj());
- queryObj.sort(BSON("lastmod" << 1));
+ QueryAndSort queryObj(queryB.obj(), BSON("lastmod" << 1));
LOG(2) << "major version query from " << *_maxVersion << " and over "
<< _maxShardVersions->size() << " shards is " << queryObj;
@@ -241,16 +217,21 @@ Query ConfigDiffTracker<ValType, ShardType>::configDiffQuery() const {
return queryObj;
}
-template <class ValType, class ShardType>
-void ConfigDiffTracker<ValType, ShardType>::_assertAttached() const {
+template <class ValType>
+void ConfigDiffTracker<ValType>::_assertAttached() const {
invariant(_currMap);
invariant(_maxVersion);
invariant(_maxShardVersions);
}
+std::string ConfigDiffTrackerBase::QueryAndSort::toString() const {
+ return str::stream() << "query: " << query << ", sort: " << sort;
+}
// Ensures that these instances of the template are compiled
-template class ConfigDiffTracker<BSONObj, std::string>;
-template class ConfigDiffTracker<std::shared_ptr<Chunk>, std::string>;
+class Chunk;
+
+template class ConfigDiffTracker<BSONObj>;
+template class ConfigDiffTracker<std::shared_ptr<Chunk>>;
} // namespace mongo
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index 00d713addd5..ce53a95a043 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -32,29 +32,43 @@
#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobj.h"
+#include "mongo/s/client/shard.h"
namespace mongo {
class ChunkType;
struct ChunkVersion;
-class CatalogManager;
-class Query;
+
+class ConfigDiffTrackerBase {
+public:
+ /**
+ * Structure repsenting the generated query and sort order for a chunk diffing operation.
+ */
+ struct QueryAndSort {
+ QueryAndSort(BSONObj inQuery, BSONObj inSort) : query(inQuery), sort(inSort) {}
+
+ std::string toString() const;
+
+ const BSONObj query;
+ const BSONObj sort;
+ };
+};
/**
- * This class manages and applies diffs from partial config server data reloads. Because the
- * config data can be large, we want to update it in small parts, not all-at-once. Once a
- * ConfigDiffTracker is created, the current config data is *attached* to it, and it is then
- * able to modify the data.
+ * This class manages and applies diffs from partial config server data reloads. Because the config
+ * data can be large, we want to update it in small parts, not all-at-once. Once a
+ * ConfigDiffTracker is created, the current config data is *attached* to it, and it is then able
+ * to modify the data.
*
- * The current form is templated b/c the overall algorithm is identical between mongos and
- * mongod, but the actual chunk maps used differ in implementation. We don't want to copy the
- * implementation, because the logic is identical, or the chunk data, because that would be
- * slow for big clusters, so this is the alternative for now.
+ * The current form is templated b/c the overall algorithm is identical between mongos and mongod,
+ * but the actual chunk maps used differ in implementation. We don't want to copy the
+ * implementation, because the logic is identical, or the chunk data, because that would be slow
+ * for big clusters, so this is the alternative for now.
*
* TODO: Standardize between mongos and mongod and convert template parameters to types.
*/
-template <class ValType, class ShardType>
-class ConfigDiffTracker {
+template <class ValType>
+class ConfigDiffTracker : public ConfigDiffTrackerBase {
public:
// Stores ranges indexed by max or min key
typedef typename std::map<BSONObj, ValType, BSONObjCmp> RangeMap;
@@ -64,8 +78,7 @@ public:
typename std::pair<typename RangeMap::iterator, typename RangeMap::iterator> RangeOverlap;
// Map of shard identifiers to the maximum chunk version on that shard
- typedef typename std::map<ShardType, ChunkVersion> MaxChunkVersionMap;
-
+ typedef typename std::map<ShardId, ChunkVersion> MaxChunkVersionMap;
ConfigDiffTracker();
virtual ~ConfigDiffTracker();
@@ -97,35 +110,32 @@ public:
// Returns a subset of ranges overlapping the region min/max
RangeOverlap overlappingRange(const BSONObj& min, const BSONObj& max);
- // Finds and applies the changes to a collection from the config servers via
- // the catalog manager.
- // Also includes minor version changes for particular major-version chunks if explicitly
- // specified.
- // Returns the number of diffs processed, or -1 if the diffs were inconsistent
- // Throws a DBException on connection errors
- int calculateConfigDiff(CatalogManager* catalogManager);
-
- // Applies changes to the config data from a vector of chunks passed in
- // Returns the number of diffs processed, or -1 if the diffs were inconsistent
- // Throws a DBException on connection errors
+ // Applies changes to the config data from a vector of chunks passed in. Also includes minor
+ // version changes for particular major-version chunks if explicitly specified.
+ // Returns the number of diffs processed, or -1 if the diffs were inconsistent.
int calculateConfigDiff(const std::vector<ChunkType>& chunks);
// Returns the query needed to find new changes to a collection from the config server
// Needed only if a custom connection is required to the config server
- Query configDiffQuery() const;
+ QueryAndSort configDiffQuery() const;
protected:
- // Determines which chunks are actually being remembered by our RangeMap
+ /**
+ * Determines which chunks are actually being remembered by our RangeMap. Allows individual
+ * shards to filter out results, which belong to the local shard only.
+ */
virtual bool isTracked(const ChunkType& chunk) const = 0;
- // Whether or not our RangeMap uses min or max keys
+ /**
+ * Whether or not our RangeMap uses min or max keys
+ */
virtual bool isMinKeyIndexed() const {
return true;
}
virtual std::pair<BSONObj, ValType> rangeFor(const ChunkType& chunk) const = 0;
- virtual ShardType shardFor(const std::string& name) const = 0;
+ virtual ShardId shardFor(const std::string& name) const = 0;
private:
void _assertAttached() const;
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index 067b80ac0f9..b6ecd3f84f4 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -26,32 +26,34 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/s/chunk_diff.h"
#include <string>
+#include <map>
#include <utility>
#include "mongo/db/jsobj.h"
+#include "mongo/platform/random.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/unittest/unittest.h"
+namespace mongo {
namespace {
-using mongo::BSONObj;
-using mongo::ChunkType;
-using mongo::ConfigDiffTracker;
using std::string;
using std::pair;
using std::make_pair;
+using std::map;
-// XXX
-// We'd move ChunkDiffUnitTest here
-// We can check the queries it generates.
-// We can check if is populating the attaching structures properly
-//
+// Generates pseudorandom values
+PseudoRandom rand(1);
-// The default pass-through adapter for using config diffs.
-class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj, string> {
+/**
+ * The default pass-through adapter for using config diffs.
+ */
+class DefaultDiffAdapter : public ConfigDiffTracker<BSONObj> {
public:
DefaultDiffAdapter() {}
virtual ~DefaultDiffAdapter() {}
@@ -64,14 +66,363 @@ public:
return make_pair(chunk.getMin(), chunk.getMax());
}
- virtual string shardFor(const string& name) const {
+ virtual ShardId shardFor(const string& name) const {
return name;
}
};
-TEST(Basics, Simple) {
- DefaultDiffAdapter differ;
- ASSERT_TRUE(true);
+/**
+ * Inverts the storage order for chunks from min to max.
+ */
+class InverseDiffAdapter : public DefaultDiffAdapter {
+public:
+ InverseDiffAdapter() {}
+ virtual ~InverseDiffAdapter() {}
+
+ virtual bool isMinKeyIndexed() const {
+ return false;
+ }
+
+ virtual pair<BSONObj, BSONObj> rangeFor(const ChunkType& chunk) const {
+ return make_pair(chunk.getMax(), chunk.getMin());
+ }
+};
+
+/**
+ * Converts array of raw BSONObj chunks to a vector of ChunkType.
+ */
+void convertBSONArrayToChunkTypes(const BSONArray& chunksArray,
+ std::vector<ChunkType>* chunksVector) {
+ for (const BSONElement& obj : chunksArray) {
+ auto chunkTypeRes = ChunkType::fromBSON(obj.Obj());
+ ASSERT(chunkTypeRes.isOK());
+ chunksVector->push_back(chunkTypeRes.getValue());
+ }
+}
+
+class ChunkDiffUnitTest : public mongo::unittest::Test {
+protected:
+ typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
+ typedef map<string, ChunkVersion> VersionMap;
+
+ ChunkDiffUnitTest() = default;
+ ~ChunkDiffUnitTest() = default;
+
+ void runTest(bool isInverse) {
+ int numShards = 10;
+ int numInitialChunks = 5;
+
+ // Needed to not overflow the BSONArray's max bytes
+ int maxChunks = 100000;
+ int keySize = 2;
+
+ BSONArrayBuilder chunksB;
+
+ BSONObj lastSplitPt;
+ ChunkVersion version(1, 0, OID());
+
+ // Generate numChunks with a given key size over numShards. All chunks have double key
+ // values, so we can split them a bunch.
+
+ for (int i = -1; i < numInitialChunks; i++) {
+ BSONObjBuilder splitPtB;
+ for (int k = 0; k < keySize; k++) {
+ string field = string("k") + string(1, (char)('0' + k));
+ if (i < 0)
+ splitPtB.appendMinKey(field);
+ else if (i < numInitialChunks - 1)
+ splitPtB.append(field, (double)i);
+ else
+ splitPtB.appendMaxKey(field);
+ }
+ BSONObj splitPt = splitPtB.obj();
+
+ if (i >= 0) {
+ BSONObjBuilder chunkB;
+
+ chunkB.append(ChunkType::name(), "$dummyname");
+ chunkB.append(ChunkType::ns(), "$dummyns");
+
+ chunkB.append(ChunkType::min(), lastSplitPt);
+ chunkB.append(ChunkType::max(), splitPt);
+
+ int shardNum = rand(numShards);
+ chunkB.append(ChunkType::shard(), "shard" + string(1, (char)('A' + shardNum)));
+
+ rand(2) ? version.incMajor() : version.incMinor();
+ version.addToBSON(chunkB, ChunkType::DEPRECATED_lastmod());
+
+ chunksB.append(chunkB.obj());
+ }
+
+ lastSplitPt = splitPt;
+ }
+
+ BSONArray chunks = chunksB.arr();
+
+ // Setup the empty ranges and versions first
+ RangeMap ranges;
+ ChunkVersion maxVersion = ChunkVersion(0, 0, OID());
+ VersionMap maxShardVersions;
+
+ // Create a differ which will track our progress
+ std::shared_ptr<DefaultDiffAdapter> differ(isInverse ? new InverseDiffAdapter()
+ : new DefaultDiffAdapter());
+ differ->attach("test", ranges, maxVersion, maxShardVersions);
+
+ std::vector<ChunkType> chunksVector;
+ convertBSONArrayToChunkTypes(chunks, &chunksVector);
+
+ // Validate initial load
+ differ->calculateConfigDiff(chunksVector);
+ validate(isInverse, chunksVector, ranges, maxVersion, maxShardVersions);
+
+ // Generate a lot of diffs, and keep validating that updating from the diffs always gives us
+ // the right ranges and versions
+
+ // Makes about 100000 chunks overall
+ int numDiffs = 135;
+ int numChunks = numInitialChunks;
+
+ for (int i = 0; i < numDiffs; i++) {
+ BSONArrayBuilder diffsB;
+ BSONArrayBuilder newChunksB;
+ BSONObjIterator chunksIt(chunks);
+
+ while (chunksIt.more()) {
+ BSONObj chunk = chunksIt.next().Obj();
+
+ int randChoice = rand(10);
+
+ if (randChoice < 2 && numChunks < maxChunks) {
+ // Simulate a split
+ BSONObjBuilder leftB;
+ BSONObjBuilder rightB;
+ BSONObjBuilder midB;
+
+ for (int k = 0; k < keySize; k++) {
+ string field = string("k") + string(1, (char)('0' + k));
+
+ BSONType maxType = chunk[ChunkType::max()].Obj()[field].type();
+ double max =
+ maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0;
+ BSONType minType = chunk[ChunkType::min()].Obj()[field].type();
+ double min = minType == NumberDouble
+ ? chunk[ChunkType::min()].Obj()[field].Number()
+ : 0.0;
+
+ if (minType == MinKey) {
+ midB.append(field, max - 1.0);
+ } else if (maxType == MaxKey) {
+ midB.append(field, min + 1.0);
+ } else {
+ midB.append(field, (max + min) / 2.0);
+ }
+ }
+
+ BSONObj midPt = midB.obj();
+
+ // Only happens if we can't split the min chunk
+ if (midPt.isEmpty()) {
+ continue;
+ }
+
+ leftB.append(chunk[ChunkType::min()]);
+ leftB.append(ChunkType::max(), midPt);
+ rightB.append(ChunkType::min(), midPt);
+ rightB.append(chunk[ChunkType::max()]);
+
+ // Add required fields for ChunkType
+ leftB.append(chunk[ChunkType::name()]);
+ leftB.append(chunk[ChunkType::ns()]);
+ rightB.append(chunk[ChunkType::name()]);
+ rightB.append(chunk[ChunkType::ns()]);
+
+ leftB.append(chunk[ChunkType::shard()]);
+ rightB.append(chunk[ChunkType::shard()]);
+
+ version.incMajor();
+ version._minor = 0;
+ version.addToBSON(leftB, ChunkType::DEPRECATED_lastmod());
+ version.incMinor();
+ version.addToBSON(rightB, ChunkType::DEPRECATED_lastmod());
+
+ BSONObj left = leftB.obj();
+ BSONObj right = rightB.obj();
+
+ newChunksB.append(left);
+ newChunksB.append(right);
+
+ diffsB.append(right);
+ diffsB.append(left);
+
+ numChunks++;
+ } else if (randChoice < 4 && chunksIt.more()) {
+ // Simulate a migrate
+ BSONObj prevShardChunk;
+ while (chunksIt.more()) {
+ prevShardChunk = chunksIt.next().Obj();
+
+ if (prevShardChunk[ChunkType::shard()].String() ==
+ chunk[ChunkType::shard()].String()) {
+ break;
+ }
+
+ newChunksB.append(prevShardChunk);
+
+ prevShardChunk = BSONObj();
+ }
+
+ // We need to move between different shards, hence the weirdness in logic here
+ if (!prevShardChunk.isEmpty()) {
+ BSONObjBuilder newShardB;
+ BSONObjBuilder prevShardB;
+
+ newShardB.append(chunk[ChunkType::min()]);
+ newShardB.append(chunk[ChunkType::max()]);
+ prevShardB.append(prevShardChunk[ChunkType::min()]);
+ prevShardB.append(prevShardChunk[ChunkType::max()]);
+
+ // add required fields for ChunkType
+ newShardB.append(chunk[ChunkType::name()]);
+ newShardB.append(chunk[ChunkType::ns()]);
+ prevShardB.append(chunk[ChunkType::name()]);
+ prevShardB.append(chunk[ChunkType::ns()]);
+
+ int shardNum = rand(numShards);
+ newShardB.append(ChunkType::shard(),
+ "shard" + string(1, (char)('A' + shardNum)));
+ prevShardB.append(prevShardChunk[ChunkType::shard()]);
+
+ version.incMajor();
+ version._minor = 0;
+ version.addToBSON(newShardB, ChunkType::DEPRECATED_lastmod());
+ version.incMinor();
+ version.addToBSON(prevShardB, ChunkType::DEPRECATED_lastmod());
+
+ BSONObj newShard = newShardB.obj();
+ BSONObj prevShard = prevShardB.obj();
+
+ newChunksB.append(newShard);
+ newChunksB.append(prevShard);
+
+ diffsB.append(newShard);
+ diffsB.append(prevShard);
+
+ } else {
+ newChunksB.append(chunk);
+ }
+ } else {
+ newChunksB.append(chunk);
+ }
+ }
+
+ BSONArray diffs = diffsB.arr();
+ chunks = newChunksB.arr();
+
+ // Rarely entirely clear out our data
+ if (rand(10) < 1) {
+ diffs = chunks;
+ ranges.clear();
+ maxVersion = ChunkVersion(0, 0, OID());
+ maxShardVersions.clear();
+ }
+
+ std::vector<ChunkType> chunksVector;
+ convertBSONArrayToChunkTypes(chunks, &chunksVector);
+
+ differ->calculateConfigDiff(chunksVector);
+
+ validate(isInverse, chunksVector, ranges, maxVersion, maxShardVersions);
+ }
+ }
+
+private:
+ // Allow validating with and without ranges (b/c our splits won't actually be updated by the
+ // diffs)
+ void validate(bool isInverse,
+ const std::vector<ChunkType>& chunks,
+ ChunkVersion maxVersion,
+ const VersionMap& maxShardVersions) {
+ validate(isInverse, chunks, NULL, maxVersion, maxShardVersions);
+ }
+
+ void validate(bool isInverse,
+ const std::vector<ChunkType>& chunks,
+ const RangeMap& ranges,
+ ChunkVersion maxVersion,
+ const VersionMap& maxShardVersions) {
+ validate(isInverse, chunks, (RangeMap*)&ranges, maxVersion, maxShardVersions);
+ }
+
+ // Validates that the ranges and versions are valid given the chunks
+ void validate(bool isInverse,
+ const std::vector<ChunkType>& chunks,
+ RangeMap* ranges,
+ ChunkVersion maxVersion,
+ const VersionMap& maxShardVersions) {
+ int chunkCount = chunks.size();
+ ChunkVersion foundMaxVersion;
+ VersionMap foundMaxShardVersions;
+
+ //
+ // Validate that all the chunks are there and collect versions
+ //
+
+ for (const ChunkType& chunk : chunks) {
+ if (ranges != NULL) {
+ // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
+ // " << chunkCount << endl;
+
+ RangeMap::iterator chunkRange =
+ ranges->find(isInverse ? chunk.getMax() : chunk.getMin());
+
+ ASSERT(chunkRange != ranges->end());
+ ASSERT(chunkRange->second.woCompare(isInverse ? chunk.getMin() : chunk.getMax()) ==
+ 0);
+ }
+
+ ChunkVersion version =
+ ChunkVersion::fromBSON(chunk.toBSON()[ChunkType::DEPRECATED_lastmod()]);
+ if (version > foundMaxVersion)
+ foundMaxVersion = version;
+
+ ChunkVersion shardMaxVersion = foundMaxShardVersions[chunk.getShard()];
+ if (version > shardMaxVersion) {
+ foundMaxShardVersions[chunk.getShard()] = version;
+ }
+ }
+
+ // Make sure all chunks are accounted for
+ if (ranges != NULL)
+ ASSERT(chunkCount == (int)ranges->size());
+
+ // log() << "Validating that all shard versions are up to date..." << endl;
+
+ // Validate that all the versions are the same
+ ASSERT(foundMaxVersion.equals(maxVersion));
+
+ for (VersionMap::iterator it = foundMaxShardVersions.begin();
+ it != foundMaxShardVersions.end();
+ it++) {
+ ChunkVersion foundVersion = it->second;
+ VersionMap::const_iterator maxIt = maxShardVersions.find(it->first);
+
+ ASSERT(maxIt != maxShardVersions.end());
+ ASSERT(foundVersion.equals(maxIt->second));
+ }
+ // Make sure all shards are accounted for
+ ASSERT(foundMaxShardVersions.size() == maxShardVersions.size());
+ }
+};
+
+TEST_F(ChunkDiffUnitTest, Normal) {
+ runTest(false);
+}
+
+TEST_F(ChunkDiffUnitTest, Inverse) {
+ runTest(true);
}
-} // unnamed namespace
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 65bd31a8628..5f578a27da4 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -73,7 +73,7 @@ namespace {
*
* The mongos adapter here tracks all shards, and stores ranges by (max, Chunk) in the map.
*/
-class CMConfigDiffTracker : public ConfigDiffTracker<shared_ptr<Chunk>, string> {
+class CMConfigDiffTracker : public ConfigDiffTracker<shared_ptr<Chunk>> {
public:
CMConfigDiffTracker(ChunkManager* manager) : _manager(manager) {}
@@ -254,7 +254,14 @@ bool ChunkManager::_load(ChunkMap& chunkMap,
differ.attach(_ns, chunkMap, _version, *shardVersions);
// Diff tracker should *always* find at least one chunk if collection exists
- int diffsApplied = differ.calculateConfigDiff(grid.catalogManager());
+ // Get the diff query required
+ auto diffQuery = differ.configDiffQuery();
+
+ std::vector<ChunkType> chunks;
+ uassertStatusOK(
+ grid.catalogManager()->getChunks(diffQuery.query, diffQuery.sort, boost::none, &chunks));
+
+ int diffsApplied = differ.calculateConfigDiff(chunks);
if (diffsApplied > 0) {
LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << _ns
<< " with version " << _version;
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index 8f96a6fce3b..7c40db13bcf 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2013 10gen Inc.
+ * Copyright (C) 2013-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
@@ -17,13 +17,13 @@
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects
- * for all of the code used other than as permitted herein. If you modify
- * file(s) with this exception, you may extend this exception to your
- * version of the file(s), but you are not obligated to do so. If you do not
- * wish to do so, delete this exception statement from your version. If you
- * delete this exception statement from all source files in the program,
- * then also delete it in the license file.
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
@@ -37,10 +37,11 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/chunk.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/config.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 5b143d59b32..82f432b8925 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -64,6 +64,7 @@
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/write_concern.h"
@@ -1560,7 +1561,8 @@ public:
// if we have chunks left on the FROM shard, update the version of one of them as
// well. we can figure that out by grabbing the metadata installed on 5.a
- const CollectionMetadataPtr bumpedCollMetadata(shardingState.getCollectionMetadata(ns));
+ const std::shared_ptr<CollectionMetadata> bumpedCollMetadata(
+ shardingState.getCollectionMetadata(ns));
if (bumpedCollMetadata->getNumChunks() > 0) {
// get another chunk on that shard
ChunkType bumpChunk;
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index fba6f67f6f1..977fe575e55 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -1,32 +1,30 @@
-// @file d_split.cpp
-
/**
-* Copyright (C) 2008-2014 MongoDB Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects
-* for all of the code used other than as permitted herein. If you modify
-* file(s) with this exception, you may extend this exception to your
-* version of the file(s), but you are not obligated to do so. If you do not
-* wish to do so, delete this exception statement from your version. If you
-* delete this exception statement from all source files in the program,
-* then also delete it in the license file.
-*/
+ * Copyright (C) 2008-2014 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
@@ -53,6 +51,7 @@
#include "mongo/db/instance.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -696,7 +695,8 @@ public:
}
// Get collection metadata
- const CollectionMetadataPtr collMetadata(shardingState.getCollectionMetadata(ns));
+ const std::shared_ptr<CollectionMetadata> collMetadata(
+ shardingState.getCollectionMetadata(ns));
// With nonzero shard version, we must have metadata
invariant(NULL != collMetadata);
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
index e2c438a3b38..81422a68d7c 100644
--- a/src/mongo/s/d_state.cpp
+++ b/src/mongo/s/d_state.cpp
@@ -1,32 +1,30 @@
-// @file d_state.cpp
-
/**
-* Copyright (C) 2008 10gen Inc.
-*
-* This program is free software: you can redistribute it and/or modify
-* it under the terms of the GNU Affero General Public License, version 3,
-* as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU Affero General Public License for more details.
-*
-* You should have received a copy of the GNU Affero General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-* As a special exception, the copyright holders give permission to link the
-* code of portions of this program with the OpenSSL library under certain
-* conditions as described in each individual source file and distribute
-* linked combinations including the program with the OpenSSL library. You
-* must comply with the GNU Affero General Public License in all respects
-* for all of the code used other than as permitted herein. If you modify
-* file(s) with this exception, you may extend this exception to your
-* version of the file(s), but you are not obligated to do so. If you do not
-* wish to do so, delete this exception statement from your version. If you
-* delete this exception statement from all source files in the program,
-* then also delete it in the license file.
-*/
+ * Copyright (C) 2008-2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
@@ -49,10 +47,10 @@
#include "mongo/db/lasterror.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/wire_version.h"
-#include "mongo/s/collection_metadata.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/stale_exception.h"
@@ -69,10 +67,6 @@ using std::string;
using std::stringstream;
using std::vector;
-bool isMongos() {
- return false;
-}
-
ShardForceVersionOkModeBlock::ShardForceVersionOkModeBlock(Client* client) {
info = ShardedConnectionInfo::get(client, false);
if (info)