summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2016-01-13 15:18:36 -0500
committerRandolph Tan <randolph@10gen.com>2016-01-15 12:03:19 -0500
commit65f2da2c49c2b22d2b80e6562b9b61f242cb9a18 (patch)
treeaef7afabf45ee7bf427f0a3097442ede23e30303 /src/mongo
parent200b1cd8caf4b617e98a486cfbf17d00108bf881 (diff)
downloadmongo-65f2da2c49c2b22d2b80e6562b9b61f242cb9a18.tar.gz
SERVER-22114 Remove unnecessary refresh in ChunkManagerTargeter::refreshNow
In addition, limit the lifetime of ChunkManager instances so it can free references to ChunkManager earlier.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp12
-rw-r--r--src/mongo/s/chunk_manager_targeter.h11
-rw-r--r--src/mongo/s/cluster_write.cpp60
-rw-r--r--src/mongo/s/cluster_write.h21
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp9
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp26
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h10
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp23
8 files changed, 60 insertions, 112 deletions
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 2d8b75d7567..d9dfbe7ec48 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -263,8 +263,9 @@ bool wasMetadataRefreshed(const ChunkManagerPtr& managerA,
} // namespace
-ChunkManagerTargeter::ChunkManagerTargeter(const NamespaceString& nss)
- : _nss(nss), _needsTargetingRefresh(false) {}
+ChunkManagerTargeter::ChunkManagerTargeter(const NamespaceString& nss, TargeterStats* stats)
+ : _nss(nss), _needsTargetingRefresh(false), _stats(stats) {}
+
Status ChunkManagerTargeter::init(OperationContext* txn) {
auto status = grid.implicitCreateDb(txn, _nss.db().toString());
@@ -515,7 +516,7 @@ Status ChunkManagerTargeter::targetShardKey(OperationContext* txn,
// Track autosplit stats for sharded collections
// Note: this is only best effort accounting and is not accurate.
if (estDataSize > 0) {
- _stats.chunkSizeDelta[chunk->getMin()] += estDataSize;
+ _stats->chunkSizeDelta[chunk->getMin()] += estDataSize;
}
*endpoint = new ShardEndpoint(chunk->getShardId(), _manager->getVersion(chunk->getShardId()));
@@ -599,10 +600,6 @@ void ChunkManagerTargeter::noteCouldNotTarget() {
_needsTargetingRefresh = true;
}
-const TargeterStats* ChunkManagerTargeter::getStats() const {
- return &_stats;
-}
-
Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasChanged) {
bool dummy;
if (!wasChanged) {
@@ -712,7 +709,6 @@ Status ChunkManagerTargeter::refreshNow(OperationContext* txn, RefreshType refre
// Dumps the db info, reloads it all, synchronization between threads happens
// internally.
config->reload(txn);
- config->getChunkManagerIfExists(txn, _nss.ns(), true, true);
} catch (const DBException& ex) {
return Status(ErrorCodes::UnknownError, ex.toString());
}
diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index 900928d8841..b9fa547d89e 100644
--- a/src/mongo/s/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
@@ -55,7 +55,7 @@ struct TargeterStats {
*/
class ChunkManagerTargeter : public NSTargeter {
public:
- ChunkManagerTargeter(const NamespaceString& nss);
+ ChunkManagerTargeter(const NamespaceString& nss, TargeterStats* stats);
/**
* Initializes the ChunkManagerTargeter with the latest targeting information for the
@@ -99,11 +99,6 @@ public:
*/
Status refreshIfNeeded(OperationContext* txn, bool* wasChanged);
- /**
- * Returns the stats. Note that the returned stats object is still owned by this targeter.
- */
- const TargeterStats* getStats() const;
-
private:
// Different ways we can refresh metadata
enum RefreshType {
@@ -158,8 +153,8 @@ private:
// Stores whether we need to check the remote server on refresh
bool _needsTargetingRefresh;
- // Represents only the view and not really part of the targeter state.
- mutable TargeterStats _stats;
+ // Represents only the view and not really part of the targeter state. This is not owned here.
+ TargeterStats* _stats;
// Zero or one of these are filled at all times
// If sharded, _manager, if unsharded, _primary, on error, neither
diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index 25c4372dfef..f782d8cbda1 100644
--- a/src/mongo/s/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -256,50 +256,40 @@ void ClusterWriter::write(OperationContext* txn,
grid.catalogManager(txn)->writeConfigServerDirect(txn, *request, response);
} else {
- ChunkManagerTargeter targeter(request->getTargetingNSS());
-
- Status targetInitStatus = targeter.init(txn);
- if (!targetInitStatus.isOK()) {
- toBatchError(Status(targetInitStatus.code(),
- str::stream() << "unable to target"
- << (request->isInsertIndexRequest() ? " index" : "")
- << " write op for collection "
- << request->getTargetingNS()
- << causedBy(targetInitStatus)),
- response);
- return;
- }
+ TargeterStats targeterStats;
+
+ {
+ ChunkManagerTargeter targeter(request->getTargetingNSS(), &targeterStats);
+
+ Status targetInitStatus = targeter.init(txn);
+ if (!targetInitStatus.isOK()) {
+ toBatchError(Status(targetInitStatus.code(),
+ str::stream()
+ << "unable to target"
+ << (request->isInsertIndexRequest() ? " index" : "")
+ << " write op for collection " << request->getTargetingNS()
+ << causedBy(targetInitStatus)),
+ response);
+ return;
+ }
- DBClientShardResolver resolver;
- DBClientMultiCommand dispatcher;
- BatchWriteExec exec(&targeter, &resolver, &dispatcher);
- exec.executeBatch(txn, *request, response);
+ DBClientShardResolver resolver;
+ DBClientMultiCommand dispatcher;
+ BatchWriteExec exec(&targeter, &resolver, &dispatcher);
+ exec.executeBatch(txn, *request, response, &_stats);
+ }
if (_autoSplit) {
- splitIfNeeded(txn, request->getNS(), *targeter.getStats());
+ splitIfNeeded(txn, request->getNS(), targeterStats);
}
-
- _stats->setShardStats(exec.releaseStats());
}
}
ClusterWriter::ClusterWriter(bool autoSplit, int timeoutMillis)
- : _autoSplit(autoSplit), _timeoutMillis(timeoutMillis), _stats(new ClusterWriterStats) {}
-
-const ClusterWriterStats& ClusterWriter::getStats() {
- return *_stats;
-}
-
-void ClusterWriterStats::setShardStats(BatchWriteExecStats* shardStats) {
- _shardStats.reset(shardStats);
-}
-
-bool ClusterWriterStats::hasShardStats() const {
- return NULL != _shardStats.get();
-}
+ : _autoSplit(autoSplit), _timeoutMillis(timeoutMillis) {}
-const BatchWriteExecStats& ClusterWriterStats::getShardStats() const {
- return *_shardStats;
+const BatchWriteExecStats& ClusterWriter::getStats() {
+ return _stats;
}
} // namespace mongo
diff --git a/src/mongo/s/cluster_write.h b/src/mongo/s/cluster_write.h
index aca55c5cae8..0af9615a47c 100644
--- a/src/mongo/s/cluster_write.h
+++ b/src/mongo/s/cluster_write.h
@@ -32,10 +32,8 @@
namespace mongo {
-class ClusterWriterStats;
class BatchedCommandRequest;
class BatchedCommandResponse;
-class BatchWriteExecStats;
class OperationContext;
class ClusterWriter {
@@ -46,28 +44,13 @@ public:
const BatchedCommandRequest& request,
BatchedCommandResponse* response);
- const ClusterWriterStats& getStats();
+ const BatchWriteExecStats& getStats();
private:
const bool _autoSplit;
const int _timeoutMillis;
- std::unique_ptr<ClusterWriterStats> _stats;
-};
-
-class ClusterWriterStats {
-public:
- // Transfers ownership to the cluster write stats
- void setShardStats(BatchWriteExecStats* _shardStats);
-
- bool hasShardStats() const;
-
- const BatchWriteExecStats& getShardStats() const;
-
- // TODO: When we have ConfigCoordinator stats, put these here too.
-
-private:
- std::unique_ptr<BatchWriteExecStats> _shardStats;
+ BatchWriteExecStats _stats;
};
/**
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index a14549e0455..c7055229889 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -193,9 +193,8 @@ public:
}
// Save the last opTimes written on each shard for this client, to allow GLE to work
- if (haveClient() && writer.getStats().hasShardStats()) {
- ClusterLastErrorInfo::get(cc())
- .addHostOpTimes(writer.getStats().getShardStats().getWriteOpTimes());
+ if (haveClient()) {
+ ClusterLastErrorInfo::get(cc()).addHostOpTimes(writer.getStats().getWriteOpTimes());
}
// TODO
@@ -232,9 +231,9 @@ private:
std::vector<Strategy::CommandResult>* results) {
// Note that this implementation will not handle targeting retries and does not completely
// emulate write behavior
-
+ TargeterStats stats;
ChunkManagerTargeter targeter(
- NamespaceString(targetingBatchItem.getRequest()->getTargetingNS()));
+ NamespaceString(targetingBatchItem.getRequest()->getTargetingNS()), &stats);
Status status = targeter.init(txn);
if (!status.isOK())
return status;
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 99d4292adf9..d9ce669e3f7 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -51,10 +51,7 @@ using std::vector;
BatchWriteExec::BatchWriteExec(NSTargeter* targeter,
ShardResolver* resolver,
MultiCommandDispatch* dispatcher)
- : _targeter(targeter),
- _resolver(resolver),
- _dispatcher(dispatcher),
- _stats(new BatchWriteExecStats) {}
+ : _targeter(targeter), _resolver(resolver), _dispatcher(dispatcher) {}
namespace {
@@ -94,7 +91,8 @@ static const int kMaxRoundsWithoutProgress(5);
void BatchWriteExec::executeBatch(OperationContext* txn,
const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse) {
+ BatchedCommandResponse* clientResponse,
+ BatchWriteExecStats* stats) {
LOG(4) << "starting execution of write batch of size "
<< static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS();
@@ -144,7 +142,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// Don't do anything until a targeter refresh
_targeter->noteCouldNotTarget();
refreshedTargeter = true;
- ++_stats->numTargetErrors;
+ ++stats->numTargetErrors;
dassert(childBatches.size() == 0u);
}
@@ -182,7 +180,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
Status resolveStatus =
_resolver->chooseWriteHost(txn, nextBatch->getEndpoint().shardName, &shardHost);
if (!resolveStatus.isOK()) {
- ++_stats->numResolveErrors;
+ ++stats->numResolveErrors;
// Record a resolve failure
// TODO: It may be necessary to refresh the cache if stale, or maybe just
@@ -269,7 +267,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
if (staleErrors.size() > 0) {
noteStaleResponses(staleErrors, _targeter);
- ++_stats->numStaleBatches;
+ ++stats->numStaleBatches;
}
// Remember if the shard is actively changing metadata right now
@@ -280,7 +278,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// Remember that we successfully wrote to this shard
// NOTE: This will record lastOps for shards where we actually didn't update
// or delete any documents, which preserves old behavior but is conservative
- _stats->noteWriteAt(
+ stats->noteWriteAt(
shardHost,
response.isLastOpSet() ? response.getLastOp() : repl::OpTime(),
response.isElectionIdSet() ? response.getElectionId() : OID());
@@ -303,7 +301,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
}
++rounds;
- ++_stats->numRounds;
+ ++stats->numRounds;
// If we're done, get out
if (batchOp.isFinished())
@@ -360,14 +358,6 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
<< " for " << clientRequest.getNS();
}
-const BatchWriteExecStats& BatchWriteExec::getStats() {
- return *_stats;
-}
-
-BatchWriteExecStats* BatchWriteExec::releaseStats() {
- return _stats.release();
-}
-
void BatchWriteExecStats::noteWriteAt(const ConnectionString& host,
repl::OpTime opTime,
const OID& electionId) {
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index bb900b2825e..e29a80a9a72 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -75,11 +75,8 @@ public:
*/
void executeBatch(OperationContext* txn,
const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse);
-
- const BatchWriteExecStats& getStats();
-
- BatchWriteExecStats* releaseStats();
+ BatchedCommandResponse* clientResponse,
+ BatchWriteExecStats* stats);
private:
// Not owned here
@@ -90,9 +87,6 @@ private:
// Not owned here
MultiCommandDispatch* _dispatcher;
-
- // Stats
- std::unique_ptr<BatchWriteExecStats> _stats;
};
struct HostOpTime {
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 19c851d1ee0..100fb1806a1 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -102,10 +102,10 @@ TEST(BatchWriteExecTests, SingleOp) {
request.getInsertRequest()->addToDocuments(BSON("x" << 1));
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numRounds, 1);
}
@@ -136,7 +136,8 @@ TEST(BatchWriteExecTests, SingleOpError) {
request.getInsertRequest()->addToDocuments(BSON("x" << 1));
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
ASSERT_EQUALS(response.getN(), 0);
ASSERT(response.isErrDetailsSet());
@@ -144,7 +145,6 @@ TEST(BatchWriteExecTests, SingleOpError) {
ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(errResponse.getErrMessage()) !=
string::npos);
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numRounds, 1);
}
@@ -180,10 +180,10 @@ TEST(BatchWriteExecTests, StaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 1);
}
@@ -217,10 +217,10 @@ TEST(BatchWriteExecTests, MultiStaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 3);
}
@@ -258,7 +258,8 @@ TEST(BatchWriteExecTests, TooManyStaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
ASSERT_EQUALS(response.getN(), 0);
ASSERT(response.isErrDetailsSet());
@@ -301,10 +302,10 @@ TEST(BatchWriteExecTests, ManyStaleOpWithMigration) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(&txn, request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(&txn, request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 10);
}