diff options
-rw-r--r-- | src/mongo/client/parallel.cpp | 51 | ||||
-rw-r--r-- | src/mongo/db/ops/update_lifecycle_impl.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/ops/update_lifecycle_impl.h | 5 | ||||
-rw-r--r-- | src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/chunk.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/chunk_diff_test.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/config.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/config.h | 1 | ||||
-rw-r--r-- | src/mongo/s/cursors.h | 8 | ||||
-rw-r--r-- | src/mongo/s/request.cpp | 5 | ||||
-rw-r--r-- | src/mongo/s/strategy.cpp | 1 |
12 files changed, 43 insertions, 48 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp index 897712926fa..4dc63472b90 100644 --- a/src/mongo/client/parallel.cpp +++ b/src/mongo/client/parallel.cpp @@ -34,7 +34,6 @@ #include "mongo/client/parallel.h" - #include "mongo/client/connpool.h" #include "mongo/client/constants.h" #include "mongo/client/dbclientcursor.h" @@ -47,13 +46,11 @@ #include "mongo/s/config.h" #include "mongo/s/grid.h" #include "mongo/s/stale_exception.h" -#include "mongo/s/version_manager.h" #include "mongo/util/log.h" namespace mongo { using std::shared_ptr; -using std::endl; using std::list; using std::map; using std::set; @@ -393,9 +390,9 @@ void ParallelConnectionMetadata::cleanup(bool full) { bool retry = false; pcState->cursor->initLazyFinish(retry); } catch (std::exception&) { - warning() << "exception closing cursor" << endl; + warning() << "exception closing cursor"; } catch (...) { - warning() << "unknown exception closing cursor" << endl; + warning() << "unknown exception closing cursor"; } } } @@ -598,7 +595,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(PCStatePtr state, dassert(repl); warning() << "Primary for " << repl->getServerAddress() << " was down before, bypassing setShardVersion." - << " The local replica set view and targeting may be stale." << endl; + << " The local replica set view and targeting may be stale."; } } else { try { @@ -607,7 +604,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(PCStatePtr state, // manager will be verified as compatible, or if the manager doesn't // exist, we don't care about version consistency LOG(pc) << "needed to set remote version on connection to value " - << "compatible with " << vinfo << endl; + << "compatible with " << vinfo; } } catch (const DBException&) { if (allowShardVersionFailure) { @@ -620,7 +617,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(PCStatePtr state, dassert(repl); warning() << "Cannot contact primary for " << repl->getServerAddress() << " to check shard version." - << " The local replica set view and targeting may be stale." << endl; + << " The local replica set view and targeting may be stale."; } } else { throw; @@ -644,7 +641,7 @@ void ParallelSortClusteredCursor::startInit() { prefix = "creating"; } } - LOG(pc) << prefix << " pcursor over " << _qSpec << " and " << _cInfo << endl; + LOG(pc) << prefix << " pcursor over " << _qSpec << " and " << _cInfo; set<ShardId> shardIds; string vinfo; @@ -680,7 +677,7 @@ void ParallelSortClusteredCursor::startInit() { ++i) { if (shardIds.find(i->first) == shardIds.end()) { LOG(pc) << "closing cursor on shard " << i->first - << " as the connection is no longer required by " << vinfo << endl; + << " as the connection is no longer required by " << vinfo; i->second.cleanup(true); } @@ -696,7 +693,7 @@ void ParallelSortClusteredCursor::startInit() { PCMData& mdata = _cursorMap[shardId]; LOG(pc) << "initializing on shard " << shardId << ", current connection state is " - << mdata.toBSON() << endl; + << mdata.toBSON(); // This may be the first time connecting to this shard, if so we can get an error here try { @@ -709,11 +706,11 @@ void ParallelSortClusteredCursor::startInit() { bool compatibleManager = true; if (primary && !state->primary) - warning() << "Collection becoming unsharded detected" << endl; + warning() << "Collection becoming unsharded detected"; if (manager && !state->manager) - warning() << "Collection becoming sharded detected" << endl; + warning() << "Collection becoming sharded detected"; if (primary && state->primary && primary != state->primary) - warning() << "Weird shift of primary detected" << endl; + warning() << "Weird shift of primary detected"; compatiblePrimary = primary && state->primary && primary == state->primary; compatibleManager = @@ -829,7 +826,7 @@ void ParallelSortClusteredCursor::startInit() { LOG(pc) << "initialized " << (isCommand() ? "command " : "query ") << (lazyInit ? "(lazily) " : "(full) ") << "on shard " << shardId - << ", current connection state is " << mdata.toBSON() << endl; + << ", current connection state is " << mdata.toBSON(); } catch (StaleConfigException& e) { // Our version isn't compatible with the current version anymore on at least one shard, // need to retry immediately @@ -846,12 +843,12 @@ void ParallelSortClusteredCursor::startInit() { int logLevel = fullReload ? 0 : 1; LOG(pc + logLevel) << "stale config of ns " << staleNS << " during initialization, will retry with forced : " << forceReload - << ", full : " << fullReload << causedBy(e) << endl; + << ", full : " << fullReload << causedBy(e); // This is somewhat strange if (staleNS != nss) warning() << "versioned ns " << nss.ns() << " doesn't match stale config namespace " - << staleNS << endl; + << staleNS; _handleStaleNS(staleNS, forceReload, fullReload); @@ -935,7 +932,7 @@ void ParallelSortClusteredCursor::finishInit() { bool retry = false; map<string, StaleConfigException> staleNSExceptions; - LOG(pc) << "finishing over " << _cursorMap.size() << " shards" << endl; + LOG(pc) << "finishing over " << _cursorMap.size() << " shards"; for (map<ShardId, PCMData>::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i) { @@ -943,7 +940,7 @@ void ParallelSortClusteredCursor::finishInit() { PCMData& mdata = i->second; LOG(pc) << "finishing on shard " << shardId << ", current connection state is " - << mdata.toBSON() << endl; + << mdata.toBSON(); // Ignore empty conns for now if (!mdata.pcState) @@ -991,7 +988,7 @@ void ParallelSortClusteredCursor::finishInit() { state->cursor->attach(state->conn.get()); // Closes connection for us LOG(pc) << "finished on shard " << shardId << ", current connection state is " - << mdata.toBSON() << endl; + << mdata.toBSON(); } } catch (RecvStaleConfigException& e) { retry = true; @@ -1066,12 +1063,12 @@ void ParallelSortClusteredCursor::finishInit() { LOG(pc + logLevel) << "stale config of ns " << staleNS << " on finishing query, will retry with forced : " << forceReload - << ", full : " << fullReload << causedBy(exception) << endl; + << ", full : " << fullReload << causedBy(exception); // This is somewhat strange if (staleNS != ns) warning() << "versioned ns " << ns << " doesn't match stale config namespace " - << staleNS << endl; + << staleNS; _handleStaleNS(staleNS, forceReload, fullReload); } @@ -1090,7 +1087,7 @@ void ParallelSortClusteredCursor::finishInit() { // Erase empty stuff if (!mdata.pcState) { - log() << "PCursor erasing empty state " << mdata.toBSON() << endl; + log() << "PCursor erasing empty state " << mdata.toBSON(); _cursorMap.erase(i++); continue; } else @@ -1211,7 +1208,7 @@ void ParallelSortClusteredCursor::_oldInit() { ++it) { log() << serverHosts[*it] << ", "; } - log() << finishedQueries << " finished queries." << endl; + log() << finishedQueries << " finished queries."; } size_t num = 0; @@ -1314,7 +1311,7 @@ void ParallelSortClusteredCursor::_oldInit() { try { if (!_cursors[i].get()->initLazyFinish(retry)) { warning() << "invalid result from " << conns[i]->getHost() - << (retry ? ", retrying" : "") << endl; + << (retry ? ", retrying" : ""); _cursors[i].reset(NULL, NULL); if (!retry) { @@ -1414,12 +1411,12 @@ void ParallelSortClusteredCursor::_oldInit() { } else if (throwException) { throw DBException(errMsg.str(), 14827); } else { - warning() << errMsg.str() << endl; + warning() << errMsg.str(); } } if (retries > 0) - log() << "successfully finished parallel query after " << retries << " retries" << endl; + log() << "successfully finished parallel query after " << retries << " retries"; } ParallelSortClusteredCursor::~ParallelSortClusteredCursor() { diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp index ff8d76bc71d..511f1b59244 100644 --- a/src/mongo/db/ops/update_lifecycle_impl.cpp +++ b/src/mongo/db/ops/update_lifecycle_impl.cpp @@ -36,7 +36,6 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/s/chunk_version.h" namespace mongo { @@ -74,7 +73,7 @@ const UpdateIndexData* UpdateLifecycleImpl::getIndexKeys(OperationContext* opCtx } const std::vector<FieldRef*>* UpdateLifecycleImpl::getImmutableFields() const { - CollectionMetadataPtr metadata = getMetadata(_nsString); + std::shared_ptr<CollectionMetadata> metadata = getMetadata(_nsString); if (metadata) { const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields(); // Return shard-keys as immutable for the update system. diff --git a/src/mongo/db/ops/update_lifecycle_impl.h b/src/mongo/db/ops/update_lifecycle_impl.h index d7e5616f4eb..da67a8f2f9d 100644 --- a/src/mongo/db/ops/update_lifecycle_impl.h +++ b/src/mongo/db/ops/update_lifecycle_impl.h @@ -58,9 +58,10 @@ public: virtual const std::vector<FieldRef*>* getImmutableFields() const; private: - Collection* _collection; const NamespaceString& _nsString; - ChunkVersion _shardVersion; + const ChunkVersion _shardVersion; + + Collection* _collection; }; } /* namespace mongo */ diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp index 1f0f9e324a5..4996af0c543 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp @@ -32,7 +32,6 @@ #include "mongo/s/catalog/legacy/catalog_manager_legacy.h" -#include <map> #include <pcrecpp.h> #include "mongo/base/status.h" @@ -80,11 +79,10 @@ namespace mongo { -using std::map; -using std::pair; using std::set; using std::shared_ptr; using std::string; +using std::unique_ptr; using std::vector; using str::stream; @@ -333,7 +331,7 @@ Status CatalogManagerLegacy::shardCollection(OperationContext* txn, logChange( txn->getClient()->clientAddress(true), "shardCollection.start", ns, collectionDetail.obj()); - ChunkManagerPtr manager(new ChunkManager(ns, fieldsAndOrder, unique)); + shared_ptr<ChunkManager> manager(new ChunkManager(ns, fieldsAndOrder, unique)); manager->createFirstChunks(dbPrimaryShardId, &initPoints, &initShardIds); manager->loadExistingRanges(nullptr); diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp index ef7fb83469f..99b797b0336 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp @@ -75,6 +75,7 @@ namespace mongo { using std::set; +using std::shared_ptr; using std::string; using std::unique_ptr; using std::vector; @@ -201,7 +202,7 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn, collectionDetail.obj()); } - ChunkManagerPtr manager(new ChunkManager(ns, fieldsAndOrder, unique)); + shared_ptr<ChunkManager> manager(new ChunkManager(ns, fieldsAndOrder, unique)); manager->createFirstChunks(dbPrimaryShardId, &initPoints, &initShardIds); manager->loadExistingRanges(nullptr); diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp index 74ebabba210..e49d271c145 100644 --- a/src/mongo/s/chunk.cpp +++ b/src/mongo/s/chunk.cpp @@ -48,8 +48,8 @@ #include "mongo/s/catalog/type_settings.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/s/client/shard_connection.h" #include "mongo/s/config.h" -#include "mongo/s/cursors.h" #include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/util/log.h" @@ -542,6 +542,7 @@ bool Chunk::splitIfShould(long dataWritten) const { LOG(1) << "won't auto split because not enough tickets: " << getManager()->getns(); return false; } + TicketHolderReleaser releaser(&(getManager()->_splitHeuristics._splitTickets)); // this is a bit ugly diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp index b6ecd3f84f4..f7c924eb901 100644 --- a/src/mongo/s/chunk_diff_test.cpp +++ b/src/mongo/s/chunk_diff_test.cpp @@ -28,8 +28,6 @@ #include "mongo/platform/basic.h" -#include "mongo/s/chunk_diff.h" - #include <string> #include <map> #include <utility> @@ -37,6 +35,7 @@ #include "mongo/db/jsobj.h" #include "mongo/platform/random.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_diff.h" #include "mongo/unittest/unittest.h" namespace mongo { diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp index 48f3e1092dd..557315fd31a 100644 --- a/src/mongo/s/config.cpp +++ b/src/mongo/s/config.cpp @@ -141,6 +141,8 @@ DBConfig::DBConfig(std::string name, const DatabaseType& dbt) : _name(name) { _shardingEnabled = dbt.getSharded(); } +DBConfig::~DBConfig() = default; + bool DBConfig::isSharded(const string& ns) { if (!_shardingEnabled) return false; diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h index 7c4f1b7440a..a9ee646f138 100644 --- a/src/mongo/s/config.h +++ b/src/mongo/s/config.h @@ -98,6 +98,7 @@ private: class DBConfig { public: DBConfig(std::string name, const DatabaseType& dbt); + ~DBConfig(); /** * The name of the database which this entry caches. diff --git a/src/mongo/s/cursors.h b/src/mongo/s/cursors.h index 0804830da1e..eb6aae735c9 100644 --- a/src/mongo/s/cursors.h +++ b/src/mongo/s/cursors.h @@ -1,5 +1,4 @@ -// cursors.h -/* +/** * Copyright (C) 2010 10gen Inc. * * This program is free software: you can redistribute it and/or modify @@ -27,7 +26,6 @@ * then also delete it in the license file. */ - #pragma once #include <string> @@ -40,7 +38,6 @@ namespace mongo { class QueryMessage; - class ShardedClientCursor { MONGO_DISALLOW_COPYING(ShardedClientCursor); @@ -154,4 +151,5 @@ private: }; extern CursorCache cursorCache; -} + +} // namespace mongo diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp index 2c330a80113..4e5b5626730 100644 --- a/src/mongo/s/request.cpp +++ b/src/mongo/s/request.cpp @@ -48,7 +48,6 @@ namespace mongo { -using std::endl; using std::string; Request::Request(Message& m, AbstractMessagingPort* p) @@ -90,7 +89,7 @@ void Request::process(int attempt) { Timer t; LOG(3) << "Request::process begin ns: " << getns() << " msg id: " << msgId << " op: " << op - << " attempt: " << attempt << endl; + << " attempt: " << attempt; _d.markSet(); @@ -122,7 +121,7 @@ void Request::process(int attempt) { } LOG(3) << "Request::process end ns: " << getns() << " msg id: " << msgId << " op: " << op - << " attempt: " << attempt << " " << t.millis() << "ms" << endl; + << " attempt: " << attempt << " " << t.millis() << "ms"; } void Request::reply(Message& response, const string& fromServer) { diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp index 0bb53cabaaf..fb7c3e1c154 100644 --- a/src/mongo/s/strategy.cpp +++ b/src/mongo/s/strategy.cpp @@ -32,7 +32,6 @@ #include "mongo/s/strategy.h" - #include "mongo/base/status.h" #include "mongo/base/owned_pointer_vector.h" #include "mongo/bson/util/builder.h" |