summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRandolph Tan <randolph@10gen.com>2016-01-13 15:18:36 -0500
committerRandolph Tan <randolph@10gen.com>2016-01-15 13:26:05 -0500
commit56eae69cf882d92e502340c9ce5dcc4a7058db9a (patch)
treeb85987a8d06e61be33c0c635e7d34a66d50c150e
parente5e6413da1ea1e6e86cdaa0e0a95ceb86e41274f (diff)
downloadmongo-56eae69cf882d92e502340c9ce5dcc4a7058db9a.tar.gz
SERVER-22114 Remove unnecessary refresh in ChunkManagerTargeter::refreshNow
In addition, limit the lifetime of ChunkManager instances so it can free references to ChunkManager earlier. (cherry picked from commit 65f2da2c49c2b22d2b80e6562b9b61f242cb9a18) Conflicts: src/mongo/s/chunk_manager_targeter.cpp src/mongo/s/chunk_manager_targeter.h src/mongo/s/cluster_write.cpp src/mongo/s/cluster_write.h src/mongo/s/commands/cluster_write_cmd.cpp src/mongo/s/write_ops/batch_write_exec.cpp src/mongo/s/write_ops/batch_write_exec.h src/mongo/s/write_ops/batch_write_exec_test.cpp
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp8
-rw-r--r--src/mongo/s/chunk_manager_targeter.h11
-rw-r--r--src/mongo/s/cluster_write.cpp49
-rw-r--r--src/mongo/s/cluster_write.h20
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp4
-rw-r--r--src/mongo/s/strategy.cpp4
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp33
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h10
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp23
9 files changed, 56 insertions, 106 deletions
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index fce380ef8e9..ad136985dd0 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -60,8 +60,8 @@ static bool getDBConfigSafe(const StringData& db, DBConfigPtr& config, string* e
return config.get();
}
-ChunkManagerTargeter::ChunkManagerTargeter()
- : _needsTargetingRefresh(false), _stats(new TargeterStats) {}
+ChunkManagerTargeter::ChunkManagerTargeter(TargeterStats* stats)
+ : _needsTargetingRefresh(false), _stats(stats) {}
Status ChunkManagerTargeter::init(const NamespaceString& nss) {
_nss = nss;
@@ -602,10 +602,6 @@ void ChunkManagerTargeter::noteCouldNotTarget() {
_needsTargetingRefresh = true;
}
-const TargeterStats* ChunkManagerTargeter::getStats() const {
- return _stats.get();
-}
-
Status ChunkManagerTargeter::refreshIfNeeded(bool* wasChanged) {
bool dummy;
if (!wasChanged)
diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index c85e9f4128d..aad04e881e9 100644
--- a/src/mongo/s/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
@@ -50,7 +50,7 @@ struct TargeterStats;
*/
class ChunkManagerTargeter : public NSTargeter {
public:
- ChunkManagerTargeter();
+ ChunkManagerTargeter(TargeterStats* stats);
/**
* Initializes the ChunkManagerTargeter with the latest targeting information for the
@@ -92,11 +92,6 @@ public:
*/
Status refreshIfNeeded(bool* wasChanged);
- /**
- * Returns the stats. Note that the returned stats object is still owned by this targeter.
- */
- const TargeterStats* getStats() const;
-
private:
// Different ways we can refresh metadata
// TODO: Improve these ways.
@@ -152,8 +147,8 @@ private:
// Stores whether we need to check the remote server on refresh
bool _needsTargetingRefresh;
- // Represents only the view and not really part of the targeter state.
- mutable boost::scoped_ptr<TargeterStats> _stats;
+ // Represents only the view and not really part of the targeter state. This is not owned here.
+ TargeterStats* _stats;
};
struct TargeterStats {
diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index f13fae8b5f1..f1911331d0f 100644
--- a/src/mongo/s/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -389,34 +389,35 @@ void ClusterWriter::write(const BatchedCommandRequest& origRequest,
}
ClusterWriter::ClusterWriter(bool autoSplit, int timeoutMillis)
- : _autoSplit(autoSplit), _timeoutMillis(timeoutMillis), _stats(new ClusterWriterStats) {}
+ : _autoSplit(autoSplit), _timeoutMillis(timeoutMillis) {}
-const ClusterWriterStats& ClusterWriter::getStats() {
- return *_stats;
+const BatchWriteExecStats& ClusterWriter::getStats() {
+ return _stats;
}
void ClusterWriter::shardWrite(const BatchedCommandRequest& request,
BatchedCommandResponse* response) {
- ChunkManagerTargeter targeter;
- Status targetInitStatus = targeter.init(request.getTargetingNSS());
+ TargeterStats targeterStats;
+ {
+ ChunkManagerTargeter targeter(&targeterStats);
+ Status targetInitStatus = targeter.init(request.getTargetingNSS());
- if (!targetInitStatus.isOK()) {
- warning() << "could not initialize targeter for"
- << (request.isInsertIndexRequest() ? " index" : "") << " write op in collection "
- << request.getTargetingNS() << endl;
+ if (!targetInitStatus.isOK()) {
+ warning() << "could not initialize targeter for"
+ << (request.isInsertIndexRequest() ? " index" : "")
+ << " write op in collection " << request.getTargetingNS() << endl;
- // Errors will be reported in response if we are unable to target
- }
+ // Errors will be reported in response if we are unable to target
+ }
- DBClientShardResolver resolver;
- DBClientMultiCommand dispatcher;
- BatchWriteExec exec(&targeter, &resolver, &dispatcher);
- exec.executeBatch(request, response);
+ DBClientShardResolver resolver;
+ DBClientMultiCommand dispatcher;
+ BatchWriteExec exec(&targeter, &resolver, &dispatcher);
+ exec.executeBatch(request, response, &_stats);
+ }
if (_autoSplit)
- splitIfNeeded(request.getNS(), *targeter.getStats());
-
- _stats->setShardStats(exec.releaseStats());
+ splitIfNeeded(request.getNS(), targeterStats);
}
void ClusterWriter::configWrite(const BatchedCommandRequest& request,
@@ -441,16 +442,4 @@ void ClusterWriter::configWrite(const BatchedCommandRequest& request,
exec.executeBatch(request, response, fsyncCheck);
}
-void ClusterWriterStats::setShardStats(BatchWriteExecStats* shardStats) {
- _shardStats.reset(shardStats);
-}
-
-bool ClusterWriterStats::hasShardStats() const {
- return NULL != _shardStats.get();
-}
-
-const BatchWriteExecStats& ClusterWriterStats::getShardStats() const {
- return *_shardStats;
-}
-
} // namespace mongo
diff --git a/src/mongo/s/cluster_write.h b/src/mongo/s/cluster_write.h
index 79b1b1ade29..89ed0dd3350 100644
--- a/src/mongo/s/cluster_write.h
+++ b/src/mongo/s/cluster_write.h
@@ -36,7 +36,6 @@
namespace mongo {
-class ClusterWriterStats;
class BatchWriteExecStats;
class ClusterWriter {
@@ -45,7 +44,7 @@ public:
void write(const BatchedCommandRequest& request, BatchedCommandResponse* response);
- const ClusterWriterStats& getStats();
+ const BatchWriteExecStats& getStats();
private:
void configWrite(const BatchedCommandRequest& request,
@@ -57,22 +56,7 @@ private:
bool _autoSplit;
int _timeoutMillis;
- boost::scoped_ptr<ClusterWriterStats> _stats;
-};
-
-class ClusterWriterStats {
-public:
- // Transfers ownership to the cluster write stats
- void setShardStats(BatchWriteExecStats* _shardStats);
-
- bool hasShardStats() const;
-
- const BatchWriteExecStats& getShardStats() const;
-
- // TODO: When we have ConfigCoordinator stats, put these here too.
-
-private:
- boost::scoped_ptr<BatchWriteExecStats> _shardStats;
+ BatchWriteExecStats _stats;
};
/**
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 96d219225e9..5b17efebe27 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -218,9 +218,9 @@ bool ClusterWriteCmd::run(OperationContext* txn,
}
// Save the last opTimes written on each shard for this client, to allow GLE to work
- if (ClientInfo::exists() && writer.getStats().hasShardStats()) {
+ if (ClientInfo::exists()) {
ClientInfo* clientInfo = ClientInfo::get(NULL);
- clientInfo->addHostOpTimes(writer.getStats().getShardStats().getWriteOpTimes());
+ clientInfo->addHostOpTimes(writer.getStats().getWriteOpTimes());
}
// TODO
diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp
index 850b620165c..f3780722b6f 100644
--- a/src/mongo/s/strategy.cpp
+++ b/src/mongo/s/strategy.cpp
@@ -452,8 +452,8 @@ Status Strategy::commandOpWrite(const std::string& dbName,
std::vector<CommandResult>* results) {
// Note that this implementation will not handle targeting retries and does not completely
// emulate write behavior
-
- ChunkManagerTargeter targeter;
+ TargeterStats targeterStats;
+ ChunkManagerTargeter targeter(&targeterStats);
Status status =
targeter.init(NamespaceString(targetingBatchItem.getRequest()->getTargetingNS()));
if (!status.isOK())
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 66a81a93023..e5a1656c65b 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -51,10 +51,7 @@ using std::vector;
BatchWriteExec::BatchWriteExec(NSTargeter* targeter,
ShardResolver* resolver,
MultiCommandDispatch* dispatcher)
- : _targeter(targeter),
- _resolver(resolver),
- _dispatcher(dispatcher),
- _stats(new BatchWriteExecStats) {}
+ : _targeter(targeter), _resolver(resolver), _dispatcher(dispatcher) {}
namespace {
@@ -92,8 +89,10 @@ static bool isShardMetadataChanging(const vector<ShardError*>& staleErrors) {
// This only applies when no writes are occurring and metadata is not changing on reload
static const int kMaxRoundsWithoutProgress(5);
+
void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse) {
+ BatchedCommandResponse* clientResponse,
+ BatchWriteExecStats* stats) {
LOG(4) << "starting execution of write batch of size "
<< static_cast<int>(clientRequest.sizeWriteOps()) << " for " << clientRequest.getNS()
<< endl;
@@ -143,7 +142,7 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
// Don't do anything until a targeter refresh
_targeter->noteCouldNotTarget();
refreshedTargeter = true;
- ++_stats->numTargetErrors;
+ ++stats->numTargetErrors;
dassert(childBatches.size() == 0u);
}
@@ -181,7 +180,7 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
Status resolveStatus =
_resolver->chooseWriteHost(nextBatch->getEndpoint().shardName, &shardHost);
if (!resolveStatus.isOK()) {
- ++_stats->numResolveErrors;
+ ++stats->numResolveErrors;
// Record a resolve failure
// TODO: It may be necessary to refresh the cache if stale, or maybe just
@@ -268,7 +267,7 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
if (staleErrors.size() > 0) {
noteStaleResponses(staleErrors, _targeter);
- ++_stats->numStaleBatches;
+ ++stats->numStaleBatches;
}
// Remember if the shard is actively changing metadata right now
@@ -279,10 +278,10 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
// Remember that we successfully wrote to this shard
// NOTE: This will record lastOps for shards where we actually didn't update
// or delete any documents, which preserves old behavior but is conservative
- _stats->noteWriteAt(shardHost,
- response.isLastOpSet() ? response.getLastOp() : OpTime(),
- response.isElectionIdSet() ? response.getElectionId()
- : OID());
+ stats->noteWriteAt(shardHost,
+ response.isLastOpSet() ? response.getLastOp() : OpTime(),
+ response.isElectionIdSet() ? response.getElectionId()
+ : OID());
} else {
// Error occurred dispatching, note it
@@ -302,7 +301,7 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
}
++rounds;
- ++_stats->numRounds;
+ ++stats->numRounds;
// If we're done, get out
if (batchOp.isFinished())
@@ -359,14 +358,6 @@ void BatchWriteExec::executeBatch(const BatchedCommandRequest& clientRequest,
<< " for " << clientRequest.getNS() << endl;
}
-const BatchWriteExecStats& BatchWriteExec::getStats() {
- return *_stats;
-}
-
-BatchWriteExecStats* BatchWriteExec::releaseStats() {
- return _stats.release();
-}
-
void BatchWriteExecStats::noteWriteAt(const ConnectionString& host,
OpTime opTime,
const OID& electionId) {
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index 14837845c42..2080e1240fb 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -73,11 +73,8 @@ public:
* This function does not throw, any errors are reported via the clientResponse.
*/
void executeBatch(const BatchedCommandRequest& clientRequest,
- BatchedCommandResponse* clientResponse);
-
- const BatchWriteExecStats& getStats();
-
- BatchWriteExecStats* releaseStats();
+ BatchedCommandResponse* clientResponse,
+ BatchWriteExecStats* stats);
private:
// Not owned here
@@ -88,9 +85,6 @@ private:
// Not owned here
MultiCommandDispatch* _dispatcher;
-
- // Stats
- std::auto_ptr<BatchWriteExecStats> _stats;
};
struct HostOpTime {
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 437f2caa024..9bfd6508561 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -101,10 +101,10 @@ TEST(BatchWriteExecTests, SingleOp) {
request.getInsertRequest()->addToDocuments(BSON("x" << 1));
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numRounds, 1);
}
@@ -134,7 +134,8 @@ TEST(BatchWriteExecTests, SingleOpError) {
request.getInsertRequest()->addToDocuments(BSON("x" << 1));
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
ASSERT_EQUALS(response.getN(), 0);
ASSERT(response.isErrDetailsSet());
@@ -142,7 +143,6 @@ TEST(BatchWriteExecTests, SingleOpError) {
ASSERT(response.getErrDetailsAt(0)->getErrMessage().find(errResponse.getErrMessage()) !=
string::npos);
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numRounds, 1);
}
@@ -177,10 +177,10 @@ TEST(BatchWriteExecTests, StaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 1);
}
@@ -213,10 +213,10 @@ TEST(BatchWriteExecTests, MultiStaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 3);
}
@@ -253,7 +253,8 @@ TEST(BatchWriteExecTests, TooManyStaleOp) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
ASSERT_EQUALS(response.getN(), 0);
ASSERT(response.isErrDetailsSet());
@@ -295,10 +296,10 @@ TEST(BatchWriteExecTests, ManyStaleOpWithMigration) {
// Execute request
BatchedCommandResponse response;
- backend.exec->executeBatch(request, &response);
+ BatchWriteExecStats stats;
+ backend.exec->executeBatch(request, &response, &stats);
ASSERT(response.getOk());
- const BatchWriteExecStats& stats = backend.exec->getStats();
ASSERT_EQUALS(stats.numStaleBatches, 10);
}