summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp6
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp24
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h16
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp5
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp2
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp5
-rw-r--r--src/mongo/db/exec/cached_plan.cpp9
-rw-r--r--src/mongo/db/exec/geo_near.cpp15
-rw-r--r--src/mongo/db/exec/near.cpp5
-rw-r--r--src/mongo/db/exec/near.h4
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp8
-rw-r--r--src/mongo/db/exec/subplan.cpp31
-rw-r--r--src/mongo/db/exec/subplan.h6
-rw-r--r--src/mongo/db/exec/update.cpp32
-rw-r--r--src/mongo/db/geo/big_polygon.cpp15
-rw-r--r--src/mongo/db/geo/geometry_container.cpp6
-rw-r--r--src/mongo/db/geo/geoparser.cpp20
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp18
-rw-r--r--src/mongo/db/ops/path_support_test.cpp9
-rw-r--r--src/mongo/db/query/plan_cache.cpp18
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp5
-rw-r--r--src/mongo/db/query/plan_ranker.cpp8
-rw-r--r--src/mongo/db/query/plan_ranker.h6
-rw-r--r--src/mongo/db/query/planner_access.cpp19
-rw-r--r--src/mongo/db/query/query_planner_test.cpp8
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp17
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.h2
-rw-r--r--src/mongo/db/range_deleter.cpp10
-rw-r--r--src/mongo/db/range_deleter.h2
-rw-r--r--src/mongo/db/s/collection_metadata.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata.h6
-rw-r--r--src/mongo/db/stats/range_deleter_server_status.cpp7
-rw-r--r--src/mongo/db/storage/key_string_test.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h5
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp11
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.cpp56
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.h13
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp10
-rw-r--r--src/mongo/s/commands/strategy.cpp5
-rw-r--r--src/mongo/s/ns_targeter.h12
-rw-r--r--src/mongo/s/shard_key_pattern.cpp28
-rw-r--r--src/mongo/s/shard_key_pattern.h5
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.cpp28
-rw-r--r--src/mongo/s/write_ops/batch_upconvert.h4
-rw-r--r--src/mongo/s/write_ops/batch_upconvert_test.cpp15
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp19
-rw-r--r--src/mongo/s/write_ops/batch_write_op.h5
-rw-r--r--src/mongo/s/write_ops/mock_ns_targeter.h22
-rw-r--r--src/mongo/s/write_ops/write_op.cpp11
-rw-r--r--src/mongo/util/net/listen.cpp11
-rw-r--r--src/mongo/util/transitional_tools_do_not_use/vector_spooling.h66
52 files changed, 421 insertions, 263 deletions
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 32fb80b5587..470bbacf6ec 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -1083,7 +1083,7 @@ void IndexCatalog::IndexIterator::_advance() {
_next = NULL;
while (_iterator != _catalog->_entries.end()) {
- IndexCatalogEntry* entry = *_iterator;
+ IndexCatalogEntry* entry = _iterator->get();
++_iterator;
if (!_includeUnfinishedIndexes) {
@@ -1330,7 +1330,7 @@ Status IndexCatalog::indexRecords(OperationContext* opCtx,
for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
++i) {
- Status s = _indexRecords(opCtx, *i, bsonRecords, keysInsertedOut);
+ Status s = _indexRecords(opCtx, i->get(), bsonRecords, keysInsertedOut);
if (!s.isOK())
return s;
}
@@ -1349,7 +1349,7 @@ void IndexCatalog::unindexRecord(OperationContext* opCtx,
for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
++i) {
- IndexCatalogEntry* entry = *i;
+ IndexCatalogEntry* entry = i->get();
// If it's a background index, we DO NOT want to log anything.
bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 7400536bc8b..9c540e4a102 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -314,11 +314,11 @@ const IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor*
return desc->_cachedEntry;
for (const_iterator i = begin(); i != end(); ++i) {
- const IndexCatalogEntry* e = *i;
+ const IndexCatalogEntry* e = i->get();
if (e->descriptor() == desc)
return e;
}
- return NULL;
+ return nullptr;
}
IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc) {
@@ -326,33 +326,31 @@ IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc)
return desc->_cachedEntry;
for (iterator i = begin(); i != end(); ++i) {
- IndexCatalogEntry* e = *i;
+ IndexCatalogEntry* e = i->get();
if (e->descriptor() == desc)
return e;
}
- return NULL;
+ return nullptr;
}
IndexCatalogEntry* IndexCatalogEntryContainer::find(const string& name) {
for (iterator i = begin(); i != end(); ++i) {
- IndexCatalogEntry* e = *i;
+ IndexCatalogEntry* e = i->get();
if (e->descriptor()->indexName() == name)
return e;
}
- return NULL;
+ return nullptr;
}
IndexCatalogEntry* IndexCatalogEntryContainer::release(const IndexDescriptor* desc) {
- for (std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
- i != _entries.mutableVector().end();
- ++i) {
- IndexCatalogEntry* e = *i;
- if (e->descriptor() != desc)
+ for (auto i = _entries.begin(); i != _entries.end(); ++i) {
+ if ((*i)->descriptor() != desc)
continue;
- _entries.mutableVector().erase(i);
+ IndexCatalogEntry* e = i->release();
+ _entries.erase(i);
return e;
}
- return NULL;
+ return nullptr;
}
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index 065d1a544ac..6a36f455661 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -218,21 +218,21 @@ private:
class IndexCatalogEntryContainer {
public:
- typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
- typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
+ typedef std::vector<std::unique_ptr<IndexCatalogEntry>>::const_iterator const_iterator;
+ typedef std::vector<std::unique_ptr<IndexCatalogEntry>>::const_iterator iterator;
const_iterator begin() const {
- return _entries.vector().begin();
+ return _entries.begin();
}
const_iterator end() const {
- return _entries.vector().end();
+ return _entries.end();
}
iterator begin() {
- return _entries.vector().begin();
+ return _entries.begin();
}
iterator end() {
- return _entries.vector().end();
+ return _entries.end();
}
// TODO: these have to be SUPER SUPER FAST
@@ -261,10 +261,10 @@ public:
// pass ownership to EntryContainer
void add(IndexCatalogEntry* entry) {
- _entries.mutableVector().push_back(entry);
+ _entries.push_back(std::unique_ptr<IndexCatalogEntry>{entry});
}
private:
- OwnedPointerVector<IndexCatalogEntry> _entries;
+ std::vector<std::unique_ptr<IndexCatalogEntry>> _entries;
};
}
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 218a93be606..e182aabe539 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
using namespace mongo;
@@ -107,9 +108,9 @@ PlanRankingDecision* createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ auto stats = stdx::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
+ why->stats.push_back(std::move(stats));
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 4cd4ca26992..ff9bd872cf3 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -416,7 +416,7 @@ Status PlanCacheListPlans::list(OperationContext* opCtx,
BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
reasonBob.append("score", entry->decision->scores[i]);
BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
- PlanStageStats* stats = entry->decision->stats.vector()[i];
+ PlanStageStats* stats = entry->decision->stats[i].get();
if (stats) {
Explain::statsToBSON(*stats, &statsBob);
}
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 1ec3611ccdf..87ce9dd8093 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
@@ -116,9 +117,9 @@ PlanRankingDecision* createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ auto stats = stdx::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
+ why->stats.push_back(std::move(stats));
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 9c10ccf7ea6..fd371a3c6c7 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -47,6 +47,7 @@
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -213,7 +214,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
<< status.reason());
}
- OwnedPointerVector<QuerySolution> solutions(rawSolutions);
+ std::vector<std::unique_ptr<QuerySolution>> solutions =
+ transitional_tools_do_not_use::spool_vector(rawSolutions);
// We cannot figure out how to answer the query. Perhaps it requires an index
// we do not have?
@@ -236,7 +238,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
verify(StageBuilder::build(
getOpCtx(), _collection, *_canonicalQuery, *solutions[0], _ws, &newRoot));
_children.emplace_back(newRoot);
- _replannedQs.reset(solutions.popAndReleaseBack());
+ _replannedQs = std::move(solutions.back());
+ solutions.pop_back();
LOG(1)
<< "Replanning of query resulted in single query solution, which will not be cached. "
@@ -264,7 +267,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
getOpCtx(), _collection, *_canonicalQuery, *solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
- multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
+ multiPlanStage->addPlan(solutions[ix].release(), nextPlanRoot, _ws);
}
// Delegate to the MultiPlanStage's plan selection facility.
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index 800a4ee76b2..b7a25313969 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -31,6 +31,9 @@
#include "mongo/db/exec/geo_near.h"
+#include <memory>
+#include <vector>
+
// For s2 search
#include "third_party/s2/s2regionintersection.h"
@@ -46,6 +49,7 @@
#include "mongo/db/matcher/expression.h"
#include "mongo/db/query/expression_index.h"
#include "mongo/db/query/expression_index_knobs.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include <algorithm>
@@ -99,7 +103,7 @@ struct StoredGeometry {
*/
static void extractGeometries(const BSONObj& doc,
const string& path,
- vector<StoredGeometry*>* geometries) {
+ std::vector<std::unique_ptr<StoredGeometry>>* geometries) {
BSONElementSet geomElements;
// NOTE: Annoyingly, we cannot just expand arrays b/c single 2d points are arrays, we need
// to manually expand all results to check if they are geometries
@@ -111,7 +115,7 @@ static void extractGeometries(const BSONObj& doc,
if (stored.get()) {
// Valid geometry element
- geometries->push_back(stored.release());
+ geometries->push_back(std::move(stored));
} else if (el.type() == Array) {
// Many geometries may be in an array
BSONObjIterator arrIt(el.Obj());
@@ -121,7 +125,7 @@ static void extractGeometries(const BSONObj& doc,
if (stored.get()) {
// Valid geometry element
- geometries->push_back(stored.release());
+ geometries->push_back(std::move(stored));
} else {
warning() << "geoNear stage read non-geometry element " << redact(nextEl)
<< " in array " << redact(el);
@@ -147,14 +151,13 @@ static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams
CRS queryCRS = nearParams.nearQuery->centroid->crs;
// Extract all the geometries out of this document for the near query
- OwnedPointerVector<StoredGeometry> geometriesOwned;
- vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
+ std::vector<std::unique_ptr<StoredGeometry>> geometries;
extractGeometries(member->obj.value(), nearParams.nearQuery->field, &geometries);
// Compute the minimum distance of all the geometries in the document
double minDistance = -1;
BSONObj minDistanceObj;
- for (vector<StoredGeometry*>::iterator it = geometries.begin(); it != geometries.end(); ++it) {
+ for (auto it = geometries.begin(); it != geometries.end(); ++it) {
StoredGeometry& stored = **it;
// NOTE: A stored document with STRICT_SPHERE CRS is treated as a malformed document
diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp
index 31825995c0e..1c7b4135631 100644
--- a/src/mongo/db/exec/near.cpp
+++ b/src/mongo/db/exec/near.cpp
@@ -154,8 +154,9 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn, Status* erro
}
// CoveredInterval and its child stage are owned by _childrenIntervals
- _childrenIntervals.push_back(intervalStatus.getValue());
- _nextInterval = _childrenIntervals.back();
+ _childrenIntervals.push_back(
+ std::unique_ptr<NearStage::CoveredInterval>{intervalStatus.getValue()});
+ _nextInterval = _childrenIntervals.back().get();
_specificStats.intervalStats.emplace_back();
_nextIntervalStats = &_specificStats.intervalStats.back();
_nextIntervalStats->minDistanceAllowed = _nextInterval->minDistance;
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index 8ba21895baf..fae4cb71b6c 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -28,7 +28,9 @@
#pragma once
+#include <memory>
#include <queue>
+#include <vector>
#include "mongo/base/status_with.h"
#include "mongo/base/string_data.h"
@@ -203,7 +205,7 @@ private:
//
// All children intervals except the last active one are only used by getStats(),
// because they are all EOF.
- OwnedPointerVector<CoveredInterval> _childrenIntervals;
+ std::vector<std::unique_ptr<CoveredInterval>> _childrenIntervals;
};
/**
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index 72949a80598..8fd48430e7a 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -181,7 +181,7 @@ public:
BSONObj planObj = planElt.Obj();
// Parse the plan into these.
- OwnedPointerVector<MatchExpression> exprs;
+ std::vector<std::unique_ptr<MatchExpression>> exprs;
unique_ptr<WorkingSet> ws(new WorkingSet());
PlanStage* userRoot = parseQuery(opCtx, collection, planObj, ws.get(), &exprs);
@@ -227,7 +227,7 @@ public:
Collection* collection,
BSONObj obj,
WorkingSet* workingSet,
- OwnedPointerVector<MatchExpression>* exprs) {
+ std::vector<std::unique_ptr<MatchExpression>>* exprs) {
BSONElement firstElt = obj.firstElement();
if (!firstElt.isABSONObj()) {
return NULL;
@@ -257,9 +257,9 @@ public:
}
std::unique_ptr<MatchExpression> me = std::move(statusWithMatcher.getValue());
// exprs is what will wind up deleting this.
- matcher = me.release();
+ matcher = me.get();
verify(NULL != matcher);
- exprs->mutableVector().push_back(matcher);
+ exprs->push_back(std::move(me));
} else if (argsTag == e.fieldName()) {
nodeArgs = argObj;
} else {
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 4fa4f8d7ad0..d23f856c780 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -32,6 +32,9 @@
#include "mongo/db/exec/subplan.h"
+#include <memory>
+#include <vector>
+
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/scoped_timer.h"
@@ -46,6 +49,7 @@
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -180,8 +184,8 @@ Status SubplanStage::planSubqueries() {
for (size_t i = 0; i < _orExpression->numChildren(); ++i) {
// We need a place to shove the results from planning this branch.
- _branchResults.push_back(new BranchPlanningResult());
- BranchPlanningResult* branchResult = _branchResults.back();
+ _branchResults.push_back(stdx::make_unique<BranchPlanningResult>());
+ BranchPlanningResult* branchResult = _branchResults.back().get();
MatchExpression* orChild = _orExpression->getChild(i);
@@ -216,9 +220,11 @@ Status SubplanStage::planSubqueries() {
// We don't set NO_TABLE_SCAN because peeking at the cache data will keep us from
// considering any plan that's a collscan.
- Status status = QueryPlanner::plan(*branchResult->canonicalQuery,
- _plannerParams,
- &branchResult->solutions.mutableVector());
+ invariant(branchResult->solutions.empty());
+ std::vector<QuerySolution*> rawSolutions;
+ Status status =
+ QueryPlanner::plan(*branchResult->canonicalQuery, _plannerParams, &rawSolutions);
+ branchResult->solutions = transitional_tools_do_not_use::spool_vector(rawSolutions);
if (!status.isOK()) {
mongoutils::str::stream ss;
@@ -290,7 +296,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
for (size_t i = 0; i < _orExpression->numChildren(); ++i) {
MatchExpression* orChild = _orExpression->getChild(i);
- BranchPlanningResult* branchResult = _branchResults[i];
+ BranchPlanningResult* branchResult = _branchResults[i].get();
if (branchResult->cachedSolution.get()) {
// We can get the index tags we need out of the cache.
@@ -300,7 +306,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
return tagStatus;
}
} else if (1 == branchResult->solutions.size()) {
- QuerySolution* soln = branchResult->solutions.front();
+ QuerySolution* soln = branchResult->solutions.front().get();
Status tagStatus = tagOrChildAccordingToCache(
cacheData.get(), soln->cacheData.get(), orChild, _indexMap);
if (!tagStatus.isOK()) {
@@ -342,7 +348,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) {
&nextPlanRoot));
// Takes ownership of solution with index 'ix' and 'nextPlanRoot'.
- multiPlanStage->addPlan(branchResult->solutions.releaseAt(ix), nextPlanRoot, _ws);
+ multiPlanStage->addPlan(branchResult->solutions[ix].release(), nextPlanRoot, _ws);
}
Status planSelectStat = multiPlanStage->pickBestPlan(yieldPolicy);
@@ -433,14 +439,14 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
// Use the query planning module to plan the whole query.
std::vector<QuerySolution*> rawSolutions;
Status status = QueryPlanner::plan(*_query, _plannerParams, &rawSolutions);
+ std::vector<std::unique_ptr<QuerySolution>> solutions =
+ transitional_tools_do_not_use::spool_vector(rawSolutions);
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
"error processing query: " + _query->toString() +
" planner returned error: " + status.reason());
}
- OwnedPointerVector<QuerySolution> solutions(rawSolutions);
-
// We cannot figure out how to answer the query. Perhaps it requires an index
// we do not have?
if (0 == solutions.size()) {
@@ -457,7 +463,8 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
_children.emplace_back(root);
// This SubplanStage takes ownership of the query solution.
- _compositeSolution.reset(solutions.popAndReleaseBack());
+ _compositeSolution = std::move(solutions.back());
+ solutions.pop_back();
return Status::OK();
} else {
@@ -478,7 +485,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) {
getOpCtx(), _collection, *_query, *solutions[ix], _ws, &nextPlanRoot));
// Takes ownership of 'solutions[ix]' and 'nextPlanRoot'.
- multiPlanStage->addPlan(solutions.releaseAt(ix), nextPlanRoot, _ws);
+ multiPlanStage->addPlan(solutions[ix].release(), nextPlanRoot, _ws);
}
// Delegate the the MultiPlanStage's plan selection facility.
diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h
index 38625a15276..d844e27472c 100644
--- a/src/mongo/db/exec/subplan.h
+++ b/src/mongo/db/exec/subplan.h
@@ -28,7 +28,9 @@
#pragma once
+#include <memory>
#include <string>
+#include <vector>
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
@@ -156,7 +158,7 @@ private:
std::unique_ptr<CachedSolution> cachedSolution;
// Query solutions resulting from planning the $or branch.
- OwnedPointerVector<QuerySolution> solutions;
+ std::vector<std::unique_ptr<QuerySolution>> solutions;
};
/**
@@ -203,7 +205,7 @@ private:
std::unique_ptr<QuerySolution> _compositeSolution;
// Holds a list of the results from planning each branch.
- OwnedPointerVector<BranchPlanningResult> _branchResults;
+ std::vector<std::unique_ptr<BranchPlanningResult>> _branchResults;
// We need this to extract cache-friendly index data from the index assignments.
std::map<StringData, size_t> _indexMap;
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index cced1c41fe6..1c67f79ba08 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -48,6 +48,7 @@
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -243,7 +244,7 @@ Status storageValidChildren(const mb::ConstElement& elem, const bool deep) {
inline Status validate(const BSONObj& original,
const FieldRefSet& updatedFields,
const mb::Document& updated,
- const std::vector<FieldRef*>* immutableAndSingleValueFields,
+ const std::vector<std::unique_ptr<FieldRef>>* immutableAndSingleValueFields,
const ModifierInterface::Options& opts) {
LOG(3) << "update validate options -- "
<< " updatedFields: " << updatedFields << " immutableAndSingleValueFields.size:"
@@ -267,14 +268,18 @@ inline Status validate(const BSONObj& original,
}
// Check all immutable fields
- if (immutableAndSingleValueFields)
- changedImmutableFields.fillFrom(*immutableAndSingleValueFields);
+ if (immutableAndSingleValueFields) {
+ changedImmutableFields.fillFrom(
+ transitional_tools_do_not_use::unspool_vector(*immutableAndSingleValueFields));
+ }
} else {
// TODO: Change impl so we don't need to create a new FieldRefSet
// -- move all conflict logic into static function on FieldRefSet?
FieldRefSet immutableFieldRef;
- if (immutableAndSingleValueFields)
- immutableFieldRef.fillFrom(*immutableAndSingleValueFields);
+ if (immutableAndSingleValueFields) {
+ immutableFieldRef.fillFrom(
+ transitional_tools_do_not_use::unspool_vector(*immutableAndSingleValueFields));
+ }
FieldRefSet::const_iterator where = updatedFields.begin();
const FieldRefSet::const_iterator end = updatedFields.end();
@@ -435,11 +440,11 @@ bool shouldRestartUpdateIfNoLongerMatches(const UpdateStageParams& params) {
return params.request->shouldReturnAnyDocs() && !params.request->getSort().isEmpty();
};
-const std::vector<FieldRef*>* getImmutableFields(OperationContext* opCtx,
- const NamespaceString& ns) {
+const std::vector<std::unique_ptr<FieldRef>>* getImmutableFields(OperationContext* opCtx,
+ const NamespaceString& ns) {
auto metadata = CollectionShardingState::get(opCtx, ns)->getMetadata();
if (metadata) {
- const std::vector<FieldRef*>& fields = metadata->getKeyPatternFields();
+ const std::vector<std::unique_ptr<FieldRef>>& fields = metadata->getKeyPatternFields();
// Return shard-keys as immutable for the update system.
return &fields;
}
@@ -558,7 +563,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco
// Verify that no immutable fields were changed and data is valid for storage.
if (!(!getOpCtx()->writesAreReplicated() || request->isFromMigration())) {
- const std::vector<FieldRef*>* immutableFields = NULL;
+ const std::vector<std::unique_ptr<FieldRef>>* immutableFields = nullptr;
if (lifecycle)
immutableFields = getImmutableFields(getOpCtx(), request->getNamespaceString());
@@ -666,7 +671,7 @@ Status UpdateStage::applyUpdateOpsForInsert(OperationContext* opCtx,
driver->setLogOp(false);
driver->setContext(ModifierInterface::ExecInfo::INSERT_CONTEXT);
- const vector<FieldRef*>* immutablePaths = NULL;
+ const std::vector<std::unique_ptr<FieldRef>>* immutablePaths = nullptr;
if (!isInternalRequest)
immutablePaths = getImmutableFields(opCtx, ns);
@@ -674,7 +679,12 @@ Status UpdateStage::applyUpdateOpsForInsert(OperationContext* opCtx,
BSONObj original;
if (cq) {
- Status status = driver->populateDocumentWithQueryFields(*cq, immutablePaths, *doc);
+ std::vector<FieldRef*> fields;
+ if (immutablePaths) {
+ fields = transitional_tools_do_not_use::unspool_vector(*immutablePaths);
+ }
+
+ Status status = driver->populateDocumentWithQueryFields(*cq, &fields, *doc);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp
index f50bdf1ae37..a83a59dd41c 100644
--- a/src/mongo/db/geo/big_polygon.cpp
+++ b/src/mongo/db/geo/big_polygon.cpp
@@ -31,7 +31,9 @@
#include <map>
#include "mongo/base/owned_pointer_vector.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -90,16 +92,19 @@ bool BigSimplePolygon::Contains(const S2Polyline& line) const {
//
const S2Polygon& polyBorder = GetPolygonBorder();
- OwnedPointerVector<S2Polyline> clippedOwned;
- vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
+ std::vector<S2Polyline*> clipped;
if (_isNormalized) {
// Polygon border is the same as the loop
polyBorder.SubtractFromPolyline(&line, &clipped);
+ const std::vector<std::unique_ptr<S2Polyline>> clippedOwned =
+ transitional_tools_do_not_use::spool_vector(clipped);
return clipped.size() == 0;
} else {
// Polygon border is the complement of the loop
polyBorder.IntersectWithPolyline(&line, &clipped);
+ const std::vector<std::unique_ptr<S2Polyline>> clippedOwned =
+ transitional_tools_do_not_use::spool_vector(clipped);
return clipped.size() == 0;
}
}
@@ -164,9 +169,9 @@ const S2Polygon& BigSimplePolygon::GetPolygonBorder() const {
// Any loop in polygon should be than a hemisphere (2*Pi).
cloned->Normalize();
- OwnedPointerVector<S2Loop> loops;
- loops.mutableVector().push_back(cloned.release());
- _borderPoly.reset(new S2Polygon(&loops.mutableVector()));
+ std::vector<S2Loop*> loops;
+ loops.push_back(cloned.release());
+ _borderPoly = stdx::make_unique<S2Polygon>(&loops);
return *_borderPoly;
}
diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp
index 3ae63e40690..2b571f1b9f0 100644
--- a/src/mongo/db/geo/geometry_container.cpp
+++ b/src/mongo/db/geo/geometry_container.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -422,10 +423,11 @@ bool containsLine(const S2Polygon& poly, const S2Polyline& otherLine) {
// Kind of a mess. We get a function for clipping the line to the
// polygon. We do this and make sure the line is the same as the
// line we're clipping against.
- OwnedPointerVector<S2Polyline> clippedOwned;
- vector<S2Polyline*>& clipped = clippedOwned.mutableVector();
+ std::vector<S2Polyline*> clipped;
poly.IntersectWithPolyline(&otherLine, &clipped);
+ const std::vector<std::unique_ptr<S2Polyline>> clippedOwned =
+ transitional_tools_do_not_use::spool_vector(clipped);
if (1 != clipped.size()) {
return false;
}
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 2be29d1dd7a..e5b09077bd5 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -31,14 +31,17 @@
#include "mongo/db/geo/geoparser.h"
#include <cmath>
+#include <memory>
#include <string>
#include <vector>
#include "mongo/db/bson/dotted_path_support.h"
#include "mongo/db/geo/shapes.h"
#include "mongo/db/jsobj.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
#include "third_party/s2/s2polygonbuilder.h"
#define BAD_VALUE(error) Status(ErrorCodes::BadValue, ::mongoutils::str::stream() << error)
@@ -180,7 +183,7 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
return BAD_VALUE("Polygon coordinates must be an array");
}
- OwnedPointerVector<S2Loop> loops;
+ std::vector<std::unique_ptr<S2Loop>> loops;
Status status = Status::OK();
string err;
@@ -209,8 +212,8 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Loop must have at least 3 different vertices: " << coordinateElt.toString(false));
}
- S2Loop* loop = new S2Loop(points);
- loops.push_back(loop);
+ loops.push_back(stdx::make_unique<S2Loop>(points));
+ S2Loop* loop = loops.back().get();
// Check whether this loop is valid.
// 1. At least 3 vertices.
@@ -239,18 +242,23 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
return BAD_VALUE("Polygon has no loops.");
}
+
// Check if the given loops form a valid polygon.
// 1. If a loop contains an edge AB, then no other loop may contain AB or BA.
// 2. No loop covers more than half of the sphere.
// 3. No two loops cross.
- if (!skipValidation && !S2Polygon::IsValid(loops.vector(), &err))
+ if (!skipValidation &&
+ !S2Polygon::IsValid(transitional_tools_do_not_use::unspool_vector(loops), &err))
return BAD_VALUE("Polygon isn't valid: " << err << " " << elem.toString(false));
// Given all loops are valid / normalized and S2Polygon::IsValid() above returns true.
// The polygon must be valid. See S2Polygon member function IsValid().
- // Transfer ownership of the loops and clears loop vector.
- out->Init(&loops.mutableVector());
+ {
+ // Transfer ownership of the loops and clears loop vector.
+ std::vector<S2Loop*> rawLoops = transitional_tools_do_not_use::leak_vector(loops);
+ out->Init(&rawLoops);
+ }
if (skipValidation)
return Status::OK();
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index 1d8764d31b1..6b65006cdd1 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -115,7 +115,7 @@ TEST( AndOp, MatchesElementSingleClause ) {
BSONObj notMatch = BSON( "a" << 5 );
unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
ASSERT( lt->init( "", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( lt.release() );
AndOp andOp;
ASSERT( andOp.init( &subMatchExpressions ).isOK() );
@@ -237,7 +237,7 @@ TEST( AndOp, MatchesIndexKeyWithoutUnknown ) {
ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
AndOp andOp;
@@ -262,7 +262,7 @@ TEST( AndOp, MatchesIndexKeyWithUnknown ) {
ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
unique_ptr<NeOp> sub3( new NeOp() );
ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
subMatchExpressions.mutableVector().push_back( sub3.release() );
@@ -285,7 +285,7 @@ TEST( OrOp, MatchesElementSingleClause ) {
BSONObj notMatch = BSON( "a" << 5 );
unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( lt.release() );
OrOp orOp;
ASSERT( orOp.init( &subMatchExpressions ).isOK() );
@@ -313,7 +313,7 @@ TEST( OrOp, MatchesElementThreeClauses ) {
ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
subMatchExpressions.mutableVector().push_back( sub3.release() );
@@ -397,7 +397,7 @@ TEST( OrOp, MatchesIndexKeyWithoutUnknown ) {
ASSERT( sub1->init( "a", baseOperand1[ "$gt" ] ).isOK() );
unique_ptr<ComparisonMatchExpression> sub2( new ComparisonMatchExpression() );
ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
OrOp orOp;
@@ -422,7 +422,7 @@ TEST( OrOp, MatchesIndexKeyWithUnknown ) {
ASSERT( sub2->init( "a", baseOperand2[ "$lt" ] ).isOK() );
unique_ptr<NeOp> sub3( new NeOp() );
ASSERT( sub3->init( "a", baseOperand3[ "$ne" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
subMatchExpressions.mutableVector().push_back( sub3.release() );
@@ -445,7 +445,7 @@ TEST( NorOp, MatchesElementSingleClause ) {
BSONObj notMatch = BSON( "a" << 4 );
unique_ptr<ComparisonMatchExpression> lt( new ComparisonMatchExpression() );
ASSERT( lt->init( "a", baseOperand[ "$lt" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( lt.release() );
NorOp norOp;
ASSERT( norOp.init( &subMatchExpressions ).isOK() );
@@ -473,7 +473,7 @@ TEST( NorOp, MatchesElementThreeClauses ) {
ASSERT( sub2->init( "a", baseOperand2[ "$gt" ] ).isOK() );
unique_ptr<ComparisonMatchExpression> sub3( new ComparisonMatchExpression() );
ASSERT( sub3->init( "a", baseOperand3[ "a" ] ).isOK() );
- OwnedPointerVector<MatchMatchExpression> subMatchExpressions;
+ std::vector<std::unique_ptr<<MatchMatchExpression>> subMatchExpressions;
subMatchExpressions.mutableVector().push_back( sub1.release() );
subMatchExpressions.mutableVector().push_back( sub2.release() );
subMatchExpressions.mutableVector().push_back( sub3.release() );
diff --git a/src/mongo/db/ops/path_support_test.cpp b/src/mongo/db/ops/path_support_test.cpp
index d514beb5f56..527384ab7bb 100644
--- a/src/mongo/db/ops/path_support_test.cpp
+++ b/src/mongo/db/ops/path_support_test.cpp
@@ -29,7 +29,9 @@
#include "mongo/db/ops/path_support.h"
#include <cstdint>
+#include <memory>
#include <string>
+#include <vector>
#include "mongo/base/error_codes.h"
#include "mongo/base/owned_pointer_vector.h"
@@ -47,6 +49,7 @@
#include "mongo/db/matcher/expression_leaf.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
@@ -659,9 +662,9 @@ public:
ImmutablePaths() {}
void addPath(const string& path) {
- _ownedPaths.mutableVector().push_back(new FieldRef(path));
+ _ownedPaths.push_back(stdx::make_unique<FieldRef>(path));
FieldRef const* conflictPath = NULL;
- ASSERT(_immutablePathSet.insert(_ownedPaths.vector().back(), &conflictPath));
+ ASSERT(_immutablePathSet.insert(_ownedPaths.back().get(), &conflictPath));
}
const FieldRefSet& getPathSet() {
@@ -670,7 +673,7 @@ public:
private:
FieldRefSet _immutablePathSet;
- OwnedPointerVector<FieldRef> _ownedPaths;
+ std::vector<std::unique_ptr<FieldRef>> _ownedPaths;
};
TEST(ExtractEqualities, IdOnlyMulti) {
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index debb7c80fdf..0f0c885b37d 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -32,6 +32,11 @@
#include "mongo/db/query/plan_cache.h"
+#include <algorithm>
+#include <math.h>
+#include <memory>
+#include <vector>
+
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
@@ -43,9 +48,7 @@
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include <algorithm>
-#include <math.h>
-#include <memory>
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
namespace {
@@ -361,13 +364,14 @@ PlanCacheEntry::~PlanCacheEntry() {
}
PlanCacheEntry* PlanCacheEntry::clone() const {
- OwnedPointerVector<QuerySolution> solutions;
+ std::vector<std::unique_ptr<QuerySolution>> solutions;
for (size_t i = 0; i < plannerData.size(); ++i) {
- QuerySolution* qs = new QuerySolution();
+ auto qs = stdx::make_unique<QuerySolution>();
qs->cacheData.reset(plannerData[i]->clone());
- solutions.mutableVector().push_back(qs);
+ solutions.push_back(std::move(qs));
}
- PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());
+ PlanCacheEntry* entry = new PlanCacheEntry(
+ transitional_tools_do_not_use::unspool_vector(solutions), decision->clone());
// Copy query shape.
entry->query = query.getOwned();
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 5672f6cc1af..dd5eead802c 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -49,6 +49,7 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/scopeguard.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
using namespace mongo;
@@ -218,9 +219,9 @@ PlanRankingDecision* createDecision(size_t numPlans) {
unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
- unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ auto stats = stdx::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
stats->specific.reset(new CollectionScanStats());
- why->stats.mutableVector().push_back(stats.release());
+ why->stats.push_back(std::move(stats));
why->scores.push_back(0U);
why->candidateOrder.push_back(i);
}
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index 964d3448576..c5c9436c2de 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -76,14 +76,14 @@ size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRan
double eofBonus = 1.0;
// Each plan will have a stat tree.
- vector<PlanStageStats*> statTrees;
+ std::vector<std::unique_ptr<PlanStageStats>> statTrees;
// Get stat trees from each plan.
// Copy stats trees instead of transferring ownership
// because multi plan runner will need its own stats
// trees for explain.
for (size_t i = 0; i < candidates.size(); ++i) {
- statTrees.push_back(candidates[i].root->getStats().release());
+ statTrees.push_back(candidates[i].root->getStats());
}
// Holds (score, candidateInndex).
@@ -98,7 +98,7 @@ size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRan
LOG(2) << "Scoring query plan: " << redact(Explain::getPlanSummary(candidates[i].root))
<< " planHitEOF=" << statTrees[i]->common.isEOF;
- double score = scoreTree(statTrees[i]);
+ double score = scoreTree(statTrees[i].get());
LOG(5) << "score = " << score;
if (statTrees[i]->common.isEOF) {
LOG(5) << "Adding +" << eofBonus << " EOF bonus to score.";
@@ -151,7 +151,7 @@ size_t PlanRanker::pickBestPlan(const vector<CandidatePlan>& candidates, PlanRan
score -= eofBonus;
}
- why->stats.mutableVector().push_back(statTrees[candidateIndex]);
+ why->stats.push_back(std::move(statTrees[candidateIndex]));
why->scores.push_back(score);
why->candidateOrder.push_back(candidateIndex);
}
diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h
index d01539b847e..30413a37e78 100644
--- a/src/mongo/db/query/plan_ranker.h
+++ b/src/mongo/db/query/plan_ranker.h
@@ -95,9 +95,9 @@ struct PlanRankingDecision {
PlanRankingDecision* clone() const {
PlanRankingDecision* decision = new PlanRankingDecision();
for (size_t i = 0; i < stats.size(); ++i) {
- PlanStageStats* s = stats.vector()[i];
+ PlanStageStats* s = stats[i].get();
invariant(s);
- decision->stats.mutableVector().push_back(s->clone());
+ decision->stats.push_back(std::unique_ptr<PlanStageStats>{s->clone()});
}
decision->scores = scores;
decision->candidateOrder = candidateOrder;
@@ -106,7 +106,7 @@ struct PlanRankingDecision {
// Stats of all plans sorted in descending order by score.
// Owned by us.
- OwnedPointerVector<PlanStageStats> stats;
+ std::vector<std::unique_ptr<PlanStageStats>> stats;
// The "goodness" score corresponding to 'stats'.
// Sorted in descending order.
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index c21ea6b5d2a..31b90ea6a09 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -33,6 +33,7 @@
#include "mongo/db/query/planner_access.h"
#include <algorithm>
+#include <memory>
#include <vector>
#include "mongo/base/owned_pointer_vector.h"
@@ -49,6 +50,7 @@
#include "mongo/db/query/query_planner_common.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace {
@@ -647,22 +649,23 @@ void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node,
// static
std::vector<QuerySolutionNode*> QueryPlannerAccess::collapseEquivalentScans(
const std::vector<QuerySolutionNode*> scans) {
- OwnedPointerVector<QuerySolutionNode> ownedScans(scans);
+ std::vector<std::unique_ptr<QuerySolutionNode>> ownedScans =
+ transitional_tools_do_not_use::spool_vector(scans);
invariant(ownedScans.size() > 0);
// Scans that need to be collapsed will be adjacent to each other in the list due to how we
// sort the query predicate. We step through the list, either merging the current scan into
// the last scan in 'collapsedScans', or adding a new entry to 'collapsedScans' if it can't
// be merged.
- OwnedPointerVector<QuerySolutionNode> collapsedScans;
+ std::vector<std::unique_ptr<QuerySolutionNode>> collapsedScans;
- collapsedScans.push_back(ownedScans.releaseAt(0));
+ collapsedScans.push_back(std::move(ownedScans[0]));
for (size_t i = 1; i < ownedScans.size(); ++i) {
- if (scansAreEquivalent(collapsedScans.back(), ownedScans[i])) {
+ if (scansAreEquivalent(collapsedScans.back().get(), ownedScans[i].get())) {
// We collapse the entry from 'ownedScans' into the back of 'collapsedScans'.
- std::unique_ptr<QuerySolutionNode> collapseFrom(ownedScans.releaseAt(i));
+ std::unique_ptr<QuerySolutionNode> collapseFrom(std::move(ownedScans[i]));
FetchNode* collapseFromFetch = getFetchNode(collapseFrom.get());
- FetchNode* collapseIntoFetch = getFetchNode(collapsedScans.back());
+ FetchNode* collapseIntoFetch = getFetchNode(collapsedScans.back().get());
// If there's no filter associated with a fetch node on 'collapseFrom', all we have to
// do is clear the filter on the node that we are collapsing into.
@@ -691,12 +694,12 @@ std::vector<QuerySolutionNode*> QueryPlannerAccess::collapseEquivalentScans(
CanonicalQuery::normalizeTree(collapsedFilter.release()));
} else {
// Scans are not equivalent and can't be collapsed.
- collapsedScans.push_back(ownedScans.releaseAt(i));
+ collapsedScans.push_back(std::move(ownedScans[i]));
}
}
invariant(collapsedScans.size() > 0);
- return collapsedScans.release();
+ return transitional_tools_do_not_use::leak_vector(collapsedScans);
}
// static
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 9e2bd34e292..2f1102e9e21 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -92,8 +92,8 @@ TEST_F(QueryPlannerTest, IndexFilterAppliedDefault) {
assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
// Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
- QuerySolution* soln = *it;
+ for (auto it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = it->get();
ASSERT_FALSE(soln->indexFilterApplied);
}
}
@@ -110,8 +110,8 @@ TEST_F(QueryPlannerTest, IndexFilterAppliedTrue) {
assertSolutionExists("{fetch: {filter: null, node: {ixscan: {pattern: {x: 1}}}}}");
// Check indexFilterApplied in query solutions;
- for (std::vector<QuerySolution*>::const_iterator it = solns.begin(); it != solns.end(); ++it) {
- QuerySolution* soln = *it;
+ for (auto it = solns.begin(); it != solns.end(); ++it) {
+ QuerySolution* soln = it->get();
ASSERT_EQUALS(params.indexFiltersApplied, soln->indexFilterApplied);
}
}
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 5fc3a6817f2..3376d24eb04 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -42,6 +42,7 @@
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_test_lib.h"
#include "mongo/util/log.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -253,7 +254,9 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
- ASSERT_OK(QueryPlanner::plan(*cq, params, &solns.mutableVector()));
+ std::vector<QuerySolution*> solnsRaw;
+ ASSERT_OK(QueryPlanner::plan(*cq, params, &solnsRaw));
+ solns = transitional_tools_do_not_use::spool_vector(solnsRaw);
}
void QueryPlannerTest::runInvalidQuery(const BSONObj& query) {
@@ -330,7 +333,9 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ std::vector<QuerySolution*> solnsRaw;
+ Status s = QueryPlanner::plan(*cq, params, &solnsRaw);
+ solns = transitional_tools_do_not_use::spool_vector(solnsRaw);
ASSERT_NOT_OK(s);
}
@@ -349,7 +354,9 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
- Status s = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ std::vector<QuerySolution*> solnsRaw;
+ Status s = QueryPlanner::plan(*cq, params, &solnsRaw);
+ solns = transitional_tools_do_not_use::spool_vector(solnsRaw);
ASSERT_OK(s);
}
@@ -368,7 +375,9 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
- Status status = QueryPlanner::plan(*cq, params, &solns.mutableVector());
+ std::vector<QuerySolution*> solnsRaw;
+ Status status = QueryPlanner::plan(*cq, params, &solnsRaw);
+ solns = transitional_tools_do_not_use::spool_vector(solnsRaw);
ASSERT_NOT_OK(status);
}
diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h
index 6d2384ed8fc..ef7b7f30d7f 100644
--- a/src/mongo/db/query/query_planner_test_fixture.h
+++ b/src/mongo/db/query/query_planner_test_fixture.h
@@ -212,7 +212,7 @@ protected:
BSONObj queryObj;
std::unique_ptr<CanonicalQuery> cq;
QueryPlannerParams params;
- OwnedPointerVector<QuerySolution> solns;
+ std::vector<std::unique_ptr<QuerySolution>> solns;
};
} // namespace mongo
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index e3a737600d7..f26ceec85c8 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/service_context.h"
#include "mongo/db/write_concern_options.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -378,16 +379,17 @@ bool RangeDeleter::deleteNow(OperationContext* opCtx,
return result;
}
-void RangeDeleter::getStatsHistory(std::vector<DeleteJobStats*>* stats) const {
- stats->clear();
- stats->reserve(kDeleteJobsHistory);
+std::vector<std::unique_ptr<DeleteJobStats>> RangeDeleter::getStatsHistory() const {
+ std::vector<std::unique_ptr<DeleteJobStats>> stats;
+ stats.reserve(kDeleteJobsHistory);
stdx::lock_guard<stdx::mutex> sl(_statsHistoryMutex);
for (std::deque<DeleteJobStats*>::const_iterator it = _statsHistory.begin();
it != _statsHistory.end();
++it) {
- stats->push_back(new DeleteJobStats(**it));
+ stats.push_back(stdx::make_unique<DeleteJobStats>(**it));
}
+ return stats;
}
BSONObj RangeDeleter::toBSON() const {
diff --git a/src/mongo/db/range_deleter.h b/src/mongo/db/range_deleter.h
index 84852b4960f..c15b07df1af 100644
--- a/src/mongo/db/range_deleter.h
+++ b/src/mongo/db/range_deleter.h
@@ -157,7 +157,7 @@ public:
//
// Note: original contents of stats will be cleared. Caller owns the returned stats.
- void getStatsHistory(std::vector<DeleteJobStats*>* stats) const;
+ std::vector<std::unique_ptr<DeleteJobStats>> getStatsHistory() const;
size_t getTotalDeletes() const;
size_t getPendingDeletes() const;
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index b2c19420643..e3c95a06e93 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -455,8 +455,8 @@ void CollectionMetadata::fillKeyPatternFields() {
while (patternIter.more()) {
BSONElement current = patternIter.next();
- _keyFields.mutableVector().push_back(new FieldRef);
- FieldRef* const newFieldRef = _keyFields.mutableVector().back();
+ _keyFields.push_back(stdx::make_unique<FieldRef>());
+ FieldRef* const newFieldRef = _keyFields.back().get();
newFieldRef->parse(current.fieldNameStringData());
}
}
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index a8c8d54de20..8d3dec9e6ef 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -152,8 +152,8 @@ public:
return _keyPattern;
}
- const std::vector<FieldRef*>& getKeyPatternFields() const {
- return _keyFields.vector();
+ const std::vector<std::unique_ptr<FieldRef>>& getKeyPatternFields() const {
+ return _keyFields;
}
BSONObj getMinKey() const;
@@ -225,7 +225,7 @@ private:
BSONObj _keyPattern;
// A vector owning the FieldRefs parsed from the shard-key pattern of field names.
- OwnedPointerVector<FieldRef> _keyFields;
+ std::vector<std::unique_ptr<FieldRef>> _keyFields;
//
// RangeMaps represent chunks by mapping the min key to the chunk's max key, allowing
diff --git a/src/mongo/db/stats/range_deleter_server_status.cpp b/src/mongo/db/stats/range_deleter_server_status.cpp
index 1c62a30fef5..43e919d160a 100644
--- a/src/mongo/db/stats/range_deleter_server_status.cpp
+++ b/src/mongo/db/stats/range_deleter_server_status.cpp
@@ -66,12 +66,9 @@ public:
BSONObjBuilder result;
- OwnedPointerVector<DeleteJobStats> statsList;
- deleter->getStatsHistory(&statsList.mutableVector());
+ auto statsList = deleter->getStatsHistory();
BSONArrayBuilder oldStatsBuilder;
- for (OwnedPointerVector<DeleteJobStats>::const_iterator it = statsList.begin();
- it != statsList.end();
- ++it) {
+ for (auto it = statsList.begin(); it != statsList.end(); ++it) {
BSONObjBuilder entryBuilder;
entryBuilder.append("deletedDocs", (*it)->deletedDocCount);
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index 01432714ce0..d4a3c5cf348 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -35,6 +35,7 @@
#include <algorithm>
#include <cmath>
#include <limits>
+#include <memory>
#include <random>
#include <typeinfo>
#include <vector>
@@ -48,6 +49,7 @@
#include "mongo/platform/decimal128.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/future.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
@@ -1000,9 +1002,9 @@ TEST_F(KeyStringTest, NumberOrderLots) {
Ordering ordering = Ordering::make(BSON("a" << 1));
- OwnedPointerVector<KeyString> keyStrings;
+ std::vector<std::unique_ptr<KeyString>> keyStrings;
for (size_t i = 0; i < numbers.size(); i++) {
- keyStrings.push_back(new KeyString(version, numbers[i], ordering));
+ keyStrings.push_back(stdx::make_unique<KeyString>(version, numbers[i], ordering));
}
for (size_t i = 0; i < numbers.size(); i++) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
index 9843d46215c..9fcd21abd3e 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp
@@ -103,7 +103,7 @@ void WiredTigerRecoveryUnit::_abort() {
for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend();
it != end;
++it) {
- Change* change = *it;
+ Change* change = it->get();
LOG(2) << "CUSTOM ROLLBACK " << redact(demangleName(typeid(*change)));
change->rollback();
}
@@ -148,7 +148,7 @@ bool WiredTigerRecoveryUnit::waitUntilDurable() {
void WiredTigerRecoveryUnit::registerChange(Change* change) {
invariant(_inUnitOfWork);
- _changes.push_back(change);
+ _changes.push_back(std::unique_ptr<Change>{change});
}
void WiredTigerRecoveryUnit::assertInActiveTxn() const {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
index 0c9a8c61166..a3c1df67773 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h
@@ -34,6 +34,9 @@
#include <memory.h>
+#include <memory>
+#include <vector>
+
#include "mongo/base/checked_cast.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/operation_context.h"
@@ -134,7 +137,7 @@ private:
SnapshotName _majorityCommittedSnapshot = SnapshotName::min();
std::unique_ptr<Timer> _timer;
- typedef OwnedPointerVector<Change> Changes;
+ typedef std::vector<std::unique_ptr<Change>> Changes;
Changes _changes;
};
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index 8b3c02fb540..ea8ef9e45ec 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -33,6 +33,9 @@
#include "mongo/platform/basic.h"
+#include <memory>
+#include <vector>
+
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/near.h"
@@ -75,7 +78,7 @@ public:
_pos(0) {}
void addInterval(vector<BSONObj> data, double min, double max) {
- _intervals.mutableVector().push_back(new MockInterval(data, min, max));
+ _intervals.push_back(stdx::make_unique<MockInterval>(data, min, max));
}
virtual StatusWith<CoveredInterval*> nextInterval(OperationContext* opCtx,
@@ -84,9 +87,9 @@ public:
if (_pos == static_cast<int>(_intervals.size()))
return StatusWith<CoveredInterval*>(NULL);
- const MockInterval& interval = *_intervals.vector()[_pos++];
+ const MockInterval& interval = *_intervals[_pos++];
- bool lastInterval = _pos == static_cast<int>(_intervals.vector().size());
+ bool lastInterval = _pos == static_cast<int>(_intervals.size());
auto queuedStage = make_unique<QueuedDataStage>(opCtx, workingSet);
@@ -117,7 +120,7 @@ public:
}
private:
- OwnedPointerVector<MockInterval> _intervals;
+ std::vector<std::unique_ptr<MockInterval>> _intervals;
int _pos;
};
diff --git a/src/mongo/s/commands/chunk_manager_targeter.cpp b/src/mongo/s/commands/chunk_manager_targeter.cpp
index 8296b0066a9..609dee87d9e 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.cpp
+++ b/src/mongo/s/commands/chunk_manager_targeter.cpp
@@ -44,6 +44,7 @@
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/s/sharding_raii.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -349,9 +350,10 @@ Status ChunkManagerTargeter::targetInsert(OperationContext* opCtx,
return Status::OK();
}
-Status ChunkManagerTargeter::targetUpdate(OperationContext* opCtx,
- const BatchedUpdateDocument& updateDoc,
- vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetUpdate(
+ OperationContext* opCtx,
+ const BatchedUpdateDocument& updateDoc,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
//
// Update targeting may use either the query or the update. This is to support save-style
// updates, of the form:
@@ -419,8 +421,7 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* opCtx,
if (!shardKey.isEmpty()) {
try {
endpoints->push_back(
- targetShardKey(shardKey, collation, (query.objsize() + updateExpr.objsize()))
- .release());
+ targetShardKey(shardKey, collation, (query.objsize() + updateExpr.objsize())));
return Status::OK();
} catch (const DBException&) {
// This update is potentially not constrained to a single shard
@@ -473,9 +474,10 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* opCtx,
}
}
-Status ChunkManagerTargeter::targetDelete(OperationContext* opCtx,
- const BatchedDeleteDocument& deleteDoc,
- vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetDelete(
+ OperationContext* opCtx,
+ const BatchedDeleteDocument& deleteDoc,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
BSONObj shardKey;
if (_manager) {
@@ -502,7 +504,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* opCtx,
// Target the shard key or delete query
if (!shardKey.isEmpty()) {
try {
- endpoints->push_back(targetShardKey(shardKey, collation, 0).release());
+ endpoints->push_back(targetShardKey(shardKey, collation, 0));
return Status::OK();
} catch (const DBException&) {
// This delete is potentially not constrained to a single shard
@@ -541,19 +543,21 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* opCtx,
return targetQuery(opCtx, deleteDoc.getQuery(), collation, endpoints);
}
-Status ChunkManagerTargeter::targetDoc(OperationContext* opCtx,
- const BSONObj& doc,
- const BSONObj& collation,
- vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetDoc(
+ OperationContext* opCtx,
+ const BSONObj& doc,
+ const BSONObj& collation,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
// NOTE: This is weird and fragile, but it's the way our language works right now -
// documents are either A) invalid or B) valid equality queries over themselves.
return targetQuery(opCtx, doc, collation, endpoints);
}
-Status ChunkManagerTargeter::targetQuery(OperationContext* opCtx,
- const BSONObj& query,
- const BSONObj& collation,
- vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetQuery(
+ OperationContext* opCtx,
+ const BSONObj& query,
+ const BSONObj& collation,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
if (!_primary && !_manager) {
return Status(ErrorCodes::NamespaceNotFound,
stream() << "could not target query in " << getNS().ns()
@@ -572,7 +576,7 @@ Status ChunkManagerTargeter::targetQuery(OperationContext* opCtx,
}
for (const ShardId& shardId : shardIds) {
- endpoints->push_back(new ShardEndpoint(
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(
shardId, _manager ? _manager->getVersion(shardId) : ChunkVersion::UNSHARDED()));
}
@@ -594,7 +598,8 @@ std::unique_ptr<ShardEndpoint> ChunkManagerTargeter::targetShardKey(const BSONOb
_manager->getVersion(chunk->getShardId()));
}
-Status ChunkManagerTargeter::targetCollection(vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetCollection(
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
if (!_primary && !_manager) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "could not target full range of " << getNS().ns()
@@ -609,14 +614,15 @@ Status ChunkManagerTargeter::targetCollection(vector<ShardEndpoint*>* endpoints)
}
for (const ShardId& shardId : shardIds) {
- endpoints->push_back(new ShardEndpoint(
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(
shardId, _manager ? _manager->getVersion(shardId) : ChunkVersion::UNSHARDED()));
}
return Status::OK();
}
-Status ChunkManagerTargeter::targetAllShards(vector<ShardEndpoint*>* endpoints) const {
+Status ChunkManagerTargeter::targetAllShards(
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
if (!_primary && !_manager) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "could not target every shard with versions for "
@@ -628,7 +634,7 @@ Status ChunkManagerTargeter::targetAllShards(vector<ShardEndpoint*>* endpoints)
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- endpoints->push_back(new ShardEndpoint(
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(
shardId, _manager ? _manager->getVersion(shardId) : ChunkVersion::UNSHARDED()));
}
@@ -660,7 +666,8 @@ void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
previouslyNotedVersion = remoteShardVersion;
}
} else {
- // Epoch changed midway while applying the batch so set the version to something unique
+ // Epoch changed midway while applying the batch so set the version to something
+ // unique
// and non-existent to force a reload when refreshIsNeeded is called.
previouslyNotedVersion = ChunkVersion::IGNORED();
}
@@ -717,7 +724,8 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasC
// Reset the field
_needsTargetingRefresh = false;
- // If we couldn't target, we might need to refresh if we haven't remotely refreshed the
+ // If we couldn't target, we might need to refresh if we haven't remotely refreshed
+ // the
// metadata since we last got it from the cache.
bool alreadyRefreshed = wasMetadataRefreshed(lastManager, lastPrimary, _manager, _primary);
diff --git a/src/mongo/s/commands/chunk_manager_targeter.h b/src/mongo/s/commands/chunk_manager_targeter.h
index 049cdb8d858..36fe46a3fe5 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.h
+++ b/src/mongo/s/commands/chunk_manager_targeter.h
@@ -29,6 +29,7 @@
#pragma once
#include <map>
+#include <memory>
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobj_comparator_interface.h"
@@ -81,16 +82,16 @@ public:
// Returns ShardKeyNotFound if the update can't be targeted without a shard key.
Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
- std::vector<ShardEndpoint*>* endpoints) const;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override;
// Returns ShardKeyNotFound if the delete can't be targeted without a shard key.
Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
- std::vector<ShardEndpoint*>* endpoints) const;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override;
- Status targetCollection(std::vector<ShardEndpoint*>* endpoints) const;
+ Status targetCollection(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override;
- Status targetAllShards(std::vector<ShardEndpoint*>* endpoints) const;
+ Status targetAllShards(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override;
void noteStaleResponse(const ShardEndpoint& endpoint, const BSONObj& staleInfo);
@@ -134,7 +135,7 @@ private:
Status targetDoc(OperationContext* opCtx,
const BSONObj& doc,
const BSONObj& collation,
- std::vector<ShardEndpoint*>* endpoints) const;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const;
/**
* Returns a vector of ShardEndpoints for a potentially multi-shard query.
@@ -146,7 +147,7 @@ private:
Status targetQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
- std::vector<ShardEndpoint*>* endpoints) const;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const;
/**
* Returns a ShardEndpoint for an exact shard key query.
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index d8a05b779b6..a5d2092a96c 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -234,8 +234,7 @@ private:
if (!status.isOK())
return status;
- OwnedPointerVector<ShardEndpoint> endpointsOwned;
- vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
+ vector<std::unique_ptr<ShardEndpoint>> endpoints;
if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Insert) {
ShardEndpoint* endpoint;
@@ -243,7 +242,7 @@ private:
targeter.targetInsert(opCtx, targetingBatchItem.getDocument(), &endpoint);
if (!status.isOK())
return status;
- endpoints.push_back(endpoint);
+ endpoints.push_back(std::unique_ptr<ShardEndpoint>{endpoint});
} else if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Update) {
Status status =
targeter.targetUpdate(opCtx, *targetingBatchItem.getUpdate(), &endpoints);
@@ -261,9 +260,8 @@ private:
// Assemble requests
std::vector<AsyncRequestsSender::Request> requests;
- for (vector<ShardEndpoint*>::const_iterator it = endpoints.begin(); it != endpoints.end();
- ++it) {
- const ShardEndpoint* endpoint = *it;
+ for (auto it = endpoints.begin(); it != endpoints.end(); ++it) {
+ const ShardEndpoint* endpoint = it->get();
auto shardStatus = shardRegistry->getShard(opCtx, endpoint->shardName);
if (!shardStatus.isOK()) {
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 1a6b2d63462..0182a091ab7 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -520,8 +520,7 @@ void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
}
void Strategy::writeOp(OperationContext* opCtx, DbMessage* dbm) {
- OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
- std::vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();
+ std::vector<std::unique_ptr<BatchedCommandRequest>> commandRequests;
msgToBatchRequests(dbm->msg(), &commandRequests);
@@ -533,7 +532,7 @@ void Strategy::writeOp(OperationContext* opCtx, DbMessage* dbm) {
clientLastError.startRequest();
}
- BatchedCommandRequest* const commandRequest = *it;
+ BatchedCommandRequest* const commandRequest = it->get();
BatchedCommandResponse commandResponse;
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 95e19a81a53..194514a44f3 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -28,7 +28,9 @@
#pragma once
+#include <memory>
#include <string>
+#include <vector>
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
@@ -94,7 +96,7 @@ public:
*/
virtual Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
- std::vector<ShardEndpoint*>* endpoints) const = 0;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const = 0;
/**
* Returns a vector of ShardEndpoints for a potentially multi-shard delete.
@@ -103,21 +105,23 @@ public:
*/
virtual Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
- std::vector<ShardEndpoint*>* endpoints) const = 0;
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const = 0;
/**
* Returns a vector of ShardEndpoints for the entire collection.
*
* Returns !OK with message if the full collection could not be targeted.
*/
- virtual Status targetCollection(std::vector<ShardEndpoint*>* endpoints) const = 0;
+ virtual Status targetCollection(
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const = 0;
/**
* Returns a vector of ShardEndpoints for all shards.
*
* Returns !OK with message if all shards could not be targeted.
*/
- virtual Status targetAllShards(std::vector<ShardEndpoint*>* endpoints) const = 0;
+ virtual Status targetAllShards(
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const = 0;
/**
* Informs the targeter that a targeting failure occurred during one of the last targeting
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index ef0009bd8d6..bce1698ecb3 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -40,6 +40,7 @@
#include "mongo/db/ops/path_support.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -74,44 +75,43 @@ static bool isHashedPatternEl(const BSONElement& el) {
}
/**
- * Currently the allowable shard keys are either
+ * Currently the allowable shard keys are either:
* i) a hashed single field, e.g. { a : "hashed" }, or
* ii) a compound list of ascending, potentially-nested field paths, e.g. { a : 1 , b.c : 1 }
*/
-static vector<FieldRef*> parseShardKeyPattern(const BSONObj& keyPattern) {
- OwnedPointerVector<FieldRef> parsedPaths;
- static const vector<FieldRef*> empty;
+static std::vector<std::unique_ptr<FieldRef>> parseShardKeyPattern(const BSONObj& keyPattern) {
+ std::vector<std::unique_ptr<FieldRef>> parsedPaths;
BSONObjIterator patternIt(keyPattern);
while (patternIt.more()) {
BSONElement patternEl = patternIt.next();
- parsedPaths.push_back(new FieldRef(patternEl.fieldNameStringData()));
+ parsedPaths.push_back(stdx::make_unique<FieldRef>(patternEl.fieldNameStringData()));
const FieldRef& patternPath = *parsedPaths.back();
// Empty path
if (patternPath.numParts() == 0)
- return empty;
+ return {};
// Extra "." in path?
if (patternPath.dottedField() != patternEl.fieldNameStringData())
- return empty;
+ return {};
// Empty parts of the path, ".."?
for (size_t i = 0; i < patternPath.numParts(); ++i) {
if (patternPath.getPart(i).size() == 0)
- return empty;
+ return {};
}
// Numeric and ascending (1.0), or "hashed" and single field
if (!patternEl.isNumber()) {
if (keyPattern.nFields() != 1 || !isHashedPatternEl(patternEl))
- return empty;
+ return {};
} else if (patternEl.numberInt() != 1) {
- return empty;
+ return {};
}
}
- return parsedPaths.release();
+ return parsedPaths;
}
ShardKeyPattern::ShardKeyPattern(const BSONObj& keyPattern)
@@ -290,7 +290,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
// Extract equalities from query.
EqualityMatches equalities;
// TODO: Build the path set initially?
- FieldRefSet keyPatternPathSet(_keyPatternPaths.vector());
+ FieldRefSet keyPatternPathSet(transitional_tools_do_not_use::unspool_vector(_keyPatternPaths));
// We only care about extracting the full key pattern paths - if they don't exist (or are
// conflicting), we don't contain the shard key.
Status eqStatus =
@@ -306,9 +306,7 @@ BSONObj ShardKeyPattern::extractShardKeyFromQuery(const CanonicalQuery& query) c
BSONObjBuilder keyBuilder;
// Iterate the parsed paths to avoid re-parsing
- for (OwnedPointerVector<FieldRef>::const_iterator it = _keyPatternPaths.begin();
- it != _keyPatternPaths.end();
- ++it) {
+ for (auto it = _keyPatternPaths.begin(); it != _keyPatternPaths.end(); ++it) {
const FieldRef& patternPath = **it;
BSONElement equalEl = findEqualityElement(equalities, patternPath);
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index ca4e01a4a5a..f7c1b8d387b 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -28,6 +28,9 @@
#pragma once
+#include <memory>
+#include <vector>
+
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
@@ -223,7 +226,7 @@ public:
private:
// Ordered, parsed paths
- OwnedPointerVector<FieldRef> _keyPatternPaths;
+ std::vector<std::unique_ptr<FieldRef>> _keyPatternPaths;
KeyPattern _keyPattern;
};
diff --git a/src/mongo/s/write_ops/batch_upconvert.cpp b/src/mongo/s/write_ops/batch_upconvert.cpp
index a456a8d22c4..d9db6fa9966 100644
--- a/src/mongo/s/write_ops/batch_upconvert.cpp
+++ b/src/mongo/s/write_ops/batch_upconvert.cpp
@@ -30,6 +30,9 @@
#include "mongo/s/write_ops/batch_upconvert.h"
+#include <memory>
+#include <vector>
+
#include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/dbmessage.h"
@@ -40,6 +43,7 @@
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_delete_document.h"
#include "mongo/s/write_ops/batched_update_document.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
@@ -51,7 +55,8 @@ using std::vector;
namespace {
// Batch inserts may get mapped to multiple batch requests, to avoid spilling MaxBSONObjSize
-void msgToBatchInserts(const Message& insertMsg, vector<BatchedCommandRequest*>* insertRequests) {
+void msgToBatchInserts(const Message& insertMsg,
+ std::vector<std::unique_ptr<BatchedCommandRequest>>* insertRequests) {
// Parsing DbMessage throws
DbMessage dbMsg(insertMsg);
NamespaceString nss(dbMsg.getns());
@@ -80,8 +85,8 @@ void msgToBatchInserts(const Message& insertMsg, vector<BatchedCommandRequest*>*
dassert(!docs.empty());
// No exceptions from here on
- BatchedCommandRequest* request =
- new BatchedCommandRequest(BatchedCommandRequest::BatchType_Insert);
+ auto request =
+ stdx::make_unique<BatchedCommandRequest>(BatchedCommandRequest::BatchType_Insert);
request->setNS(nss);
for (vector<BSONObj>::const_iterator it = docs.begin(); it != docs.end(); ++it) {
request->getInsertRequest()->addToDocuments(*it);
@@ -89,11 +94,11 @@ void msgToBatchInserts(const Message& insertMsg, vector<BatchedCommandRequest*>*
request->setOrdered(ordered);
request->setWriteConcern(WriteConcernOptions::Acknowledged);
- insertRequests->push_back(request);
+ insertRequests->push_back(std::move(request));
}
}
-BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg) {
+std::unique_ptr<BatchedCommandRequest> msgToBatchUpdate(const Message& updateMsg) {
// Parsing DbMessage throws
DbMessage dbMsg(updateMsg);
NamespaceString nss(dbMsg.getns());
@@ -110,8 +115,8 @@ BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg) {
updateDoc->setUpsert(upsert);
updateDoc->setMulti(multi);
- BatchedCommandRequest* request =
- new BatchedCommandRequest(BatchedCommandRequest::BatchType_Update);
+ auto request =
+ stdx::make_unique<BatchedCommandRequest>(BatchedCommandRequest::BatchType_Update);
request->setNS(nss);
request->getUpdateRequest()->addToUpdates(updateDoc);
request->setWriteConcern(WriteConcernOptions::Acknowledged);
@@ -119,7 +124,7 @@ BatchedCommandRequest* msgToBatchUpdate(const Message& updateMsg) {
return request;
}
-BatchedCommandRequest* msgToBatchDelete(const Message& deleteMsg) {
+std::unique_ptr<BatchedCommandRequest> msgToBatchDelete(const Message& deleteMsg) {
// Parsing DbMessage throws
DbMessage dbMsg(deleteMsg);
NamespaceString nss(dbMsg.getns());
@@ -132,8 +137,8 @@ BatchedCommandRequest* msgToBatchDelete(const Message& deleteMsg) {
deleteDoc->setLimit(limit);
deleteDoc->setQuery(query);
- BatchedCommandRequest* request =
- new BatchedCommandRequest(BatchedCommandRequest::BatchType_Delete);
+ auto request =
+ stdx::make_unique<BatchedCommandRequest>(BatchedCommandRequest::BatchType_Delete);
request->setNS(nss);
request->getDeleteRequest()->addToDeletes(deleteDoc);
request->setWriteConcern(WriteConcernOptions::Acknowledged);
@@ -148,7 +153,8 @@ void buildErrorFromResponse(const BatchedCommandResponse& response, WriteErrorDe
} // namespace
-void msgToBatchRequests(const Message& msg, vector<BatchedCommandRequest*>* requests) {
+void msgToBatchRequests(const Message& msg,
+ std::vector<std::unique_ptr<BatchedCommandRequest>>* requests) {
int opType = msg.operation();
if (opType == dbInsert) {
diff --git a/src/mongo/s/write_ops/batch_upconvert.h b/src/mongo/s/write_ops/batch_upconvert.h
index 183880eed64..3c38d2fb078 100644
--- a/src/mongo/s/write_ops/batch_upconvert.h
+++ b/src/mongo/s/write_ops/batch_upconvert.h
@@ -28,6 +28,7 @@
#pragma once
+#include <memory>
#include <vector>
namespace mongo {
@@ -42,7 +43,8 @@ class Message;
// NOTE: These functions throw on invalid message format.
//
-void msgToBatchRequests(const Message& msg, std::vector<BatchedCommandRequest*>* requests);
+void msgToBatchRequests(const Message& msg,
+ std::vector<std::unique_ptr<BatchedCommandRequest>>* requests);
/**
* Utility function for recording completed batch writes into the LastError object.
diff --git a/src/mongo/s/write_ops/batch_upconvert_test.cpp b/src/mongo/s/write_ops/batch_upconvert_test.cpp
index e74fd92b8a1..60ca35cf4d2 100644
--- a/src/mongo/s/write_ops/batch_upconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_upconvert_test.cpp
@@ -62,11 +62,10 @@ TEST(WriteBatchUpconvert, BasicInsert) {
doc.appendSelfToBufBuilder(insertMsgB);
insertMsg.setData(dbInsert, insertMsgB.buf(), insertMsgB.len());
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ std::vector<std::unique_ptr<BatchedCommandRequest>> requests;
msgToBatchRequests(insertMsg, &requests);
- BatchedCommandRequest* request = requests.back();
+ BatchedCommandRequest* request = requests.back().get();
ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Insert);
string errMsg;
ASSERT(request->isValid(&errMsg));
@@ -98,11 +97,10 @@ TEST(WriteBatchUpconvert, BasicUpdate) {
update.appendSelfToBufBuilder(updateMsgB);
updateMsg.setData(dbUpdate, updateMsgB.buf(), updateMsgB.len());
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ std::vector<std::unique_ptr<BatchedCommandRequest>> requests;
msgToBatchRequests(updateMsg, &requests);
- BatchedCommandRequest* request = requests.back();
+ BatchedCommandRequest* request = requests.back().get();
ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Update);
string errMsg;
ASSERT(request->isValid(&errMsg));
@@ -132,11 +130,10 @@ TEST(WriteBatchUpconvert, BasicDelete) {
query.appendSelfToBufBuilder(deleteMsgB);
deleteMsg.setData(dbDelete, deleteMsgB.buf(), deleteMsgB.len());
- OwnedPointerVector<BatchedCommandRequest> requestsOwned;
- vector<BatchedCommandRequest*>& requests = requestsOwned.mutableVector();
+ std::vector<std::unique_ptr<BatchedCommandRequest>> requests;
msgToBatchRequests(deleteMsg, &requests);
- BatchedCommandRequest* request = requests.back();
+ BatchedCommandRequest* request = requests.back().get();
ASSERT_EQUALS(request->getBatchType(), BatchedCommandRequest::BatchType_Delete);
string errMsg;
ASSERT(request->isValid(&errMsg));
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 6f0aad917b4..c1e411cb2ed 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -31,6 +31,8 @@
#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/base/error_codes.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h"
namespace mongo {
@@ -548,9 +550,9 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
// Special handling for write concern errors, save for later
if (response.isWriteConcernErrorSet()) {
- unique_ptr<ShardWCError> wcError(
- new ShardWCError(targetedBatch.getEndpoint(), *response.getWriteConcernError()));
- _wcErrors.mutableVector().push_back(wcError.release());
+ auto wcError = stdx::make_unique<ShardWCError>(targetedBatch.getEndpoint(),
+ *response.getWriteConcernError());
+ _wcErrors.push_back(std::move(wcError));
}
vector<WriteErrorDetail*> itemErrors;
@@ -628,10 +630,10 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
int batchIndex = targetedBatch.getWrites()[childBatchIndex]->writeOpRef.first;
// Push the upserted id with the correct index into the batch upserted ids
- BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail;
+ auto upsertedId = stdx::make_unique<BatchedUpsertDetail>();
upsertedId->setIndex(batchIndex);
upsertedId->setUpsertedID(childUpsertedId->getUpsertedID());
- _upsertedIds.mutableVector().push_back(upsertedId);
+ _upsertedIds.push_back(std::move(upsertedId));
}
}
}
@@ -761,9 +763,8 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
error->setErrCode((*_wcErrors.begin())->error.getErrCode());
}
- for (vector<ShardWCError*>::const_iterator it = _wcErrors.begin(); it != _wcErrors.end();
- ++it) {
- const ShardWCError* wcError = *it;
+ for (auto it = _wcErrors.begin(); it != _wcErrors.end(); ++it) {
+ const ShardWCError* wcError = it->get();
if (it != _wcErrors.begin())
msg << " :: and :: ";
msg << wcError->error.getErrMessage() << " at " << wcError->endpoint.shardName;
@@ -778,7 +779,7 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
//
if (_upsertedIds.size() != 0) {
- batchResp->setUpsertDetails(_upsertedIds.vector());
+ batchResp->setUpsertDetails(transitional_tools_do_not_use::unspool_vector(_upsertedIds));
}
// Stats
diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h
index 455b84e1cd3..3bfbdf39954 100644
--- a/src/mongo/s/write_ops/batch_write_op.h
+++ b/src/mongo/s/write_ops/batch_write_op.h
@@ -28,6 +28,7 @@
#pragma once
+#include <memory>
#include <set>
#include <vector>
@@ -173,10 +174,10 @@ private:
std::set<const TargetedWriteBatch*> _targeted;
// Write concern responses from all write batches so far
- OwnedPointerVector<ShardWCError> _wcErrors;
+ std::vector<std::unique_ptr<ShardWCError>> _wcErrors;
// Upserted ids for the whole write batch
- OwnedPointerVector<BatchedUpsertDetail> _upsertedIds;
+ std::vector<std::unique_ptr<BatchedUpsertDetail>> _upsertedIds;
// Stats for the entire batch op
std::unique_ptr<BatchWriteStats> _stats;
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/write_ops/mock_ns_targeter.h
index a430e3caa7d..5d73e817633 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/write_ops/mock_ns_targeter.h
@@ -33,6 +33,7 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/s/ns_targeter.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -87,13 +88,13 @@ public:
*/
Status targetInsert(OperationContext* opCtx,
const BSONObj& doc,
- ShardEndpoint** endpoint) const {
- std::vector<ShardEndpoint*> endpoints;
+ ShardEndpoint** endpoint) const override {
+ std::vector<std::unique_ptr<ShardEndpoint>> endpoints;
Status status = targetQuery(doc, &endpoints);
if (!status.isOK())
return status;
if (!endpoints.empty())
- *endpoint = endpoints.front();
+ *endpoint = endpoints.front().release();
return Status::OK();
}
@@ -103,7 +104,7 @@ public:
*/
Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
- std::vector<ShardEndpoint*>* endpoints) const {
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
return targetQuery(updateDoc.getQuery(), endpoints);
}
@@ -113,22 +114,22 @@ public:
*/
Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
- std::vector<ShardEndpoint*>* endpoints) const {
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
return targetQuery(deleteDoc.getQuery(), endpoints);
}
- Status targetCollection(std::vector<ShardEndpoint*>* endpoints) const {
+ Status targetCollection(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
// TODO: XXX
// No-op
return Status::OK();
}
- Status targetAllShards(std::vector<ShardEndpoint*>* endpoints) const {
+ Status targetAllShards(std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const override {
const std::vector<MockRange*>& ranges = getRanges();
for (std::vector<MockRange*>::const_iterator it = ranges.begin(); it != ranges.end();
++it) {
const MockRange* range = *it;
- endpoints->push_back(new ShardEndpoint(range->endpoint));
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(range->endpoint));
}
return Status::OK();
@@ -184,7 +185,8 @@ private:
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetQuery(const BSONObj& query, std::vector<ShardEndpoint*>* endpoints) const {
+ Status targetQuery(const BSONObj& query,
+ std::vector<std::unique_ptr<ShardEndpoint>>* endpoints) const {
KeyRange queryRange = parseRange(query);
const std::vector<MockRange*>& ranges = getRanges();
@@ -196,7 +198,7 @@ private:
queryRange.maxKey,
range->range.minKey,
range->range.maxKey)) {
- endpoints->push_back(new ShardEndpoint(range->endpoint));
+ endpoints->push_back(stdx::make_unique<ShardEndpoint>(range->endpoint));
}
}
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 6f58a58a124..c0eac12e7bb 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -73,8 +73,7 @@ Status WriteOp::targetWrites(OperationContext* opCtx,
bool isIndexInsert = _itemRef.getRequest()->isInsertIndexRequest();
Status targetStatus = Status::OK();
- OwnedPointerVector<ShardEndpoint> endpointsOwned;
- vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
+ std::vector<std::unique_ptr<ShardEndpoint>> endpoints;
if (isUpdate) {
targetStatus = targeter.targetUpdate(opCtx, *_itemRef.getUpdate(), &endpoints);
@@ -99,7 +98,7 @@ Status WriteOp::targetWrites(OperationContext* opCtx,
// Store single endpoint result if we targeted a single endpoint
if (endpoint)
- endpoints.push_back(endpoint);
+ endpoints.push_back(std::unique_ptr<ShardEndpoint>{endpoint});
}
// If we're targeting more than one endpoint with an update/delete, we have to target
@@ -107,7 +106,7 @@ Status WriteOp::targetWrites(OperationContext* opCtx,
// NOTE: Index inserts are currently specially targeted only at the current collection to
// avoid creating collections everywhere.
if (targetStatus.isOK() && endpoints.size() > 1u && !isIndexInsert) {
- endpointsOwned.clear();
+ endpoints.clear();
invariant(endpoints.empty());
targetStatus = targeter.targetAllShards(&endpoints);
}
@@ -116,8 +115,8 @@ Status WriteOp::targetWrites(OperationContext* opCtx,
if (!targetStatus.isOK())
return targetStatus;
- for (vector<ShardEndpoint*>::iterator it = endpoints.begin(); it != endpoints.end(); ++it) {
- ShardEndpoint* endpoint = *it;
+ for (auto it = endpoints.begin(); it != endpoints.end(); ++it) {
+ ShardEndpoint* endpoint = it->get();
_childOps.push_back(new ChildWriteOp(this));
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index 0701bfa9f93..6a3d0446faa 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -34,6 +34,9 @@
#include "mongo/util/net/listen.h"
+#include <memory>
+#include <vector>
+
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
#include "mongo/config.h"
@@ -444,15 +447,15 @@ void Listener::initAndListen() {
_readyCondition.notify_all();
}
- OwnedPointerVector<EventHolder> eventHolders;
+ std::vector<std::unique_ptr<EventHolder>> eventHolders;
std::unique_ptr<WSAEVENT[]> events(new WSAEVENT[_socks.size()]);
// Populate events array with an event for each socket we are watching
for (size_t count = 0; count < _socks.size(); ++count) {
- EventHolder* ev(new EventHolder);
- eventHolders.mutableVector().push_back(ev);
- events[count] = ev->get();
+ auto ev = stdx::make_unique<EventHolder>();
+ eventHolders.push_back(std::move(ev));
+ events[count] = eventHolders.back()->get();
}
// The check against _finished allows us to actually stop the listener by signalling it through
diff --git a/src/mongo/util/transitional_tools_do_not_use/vector_spooling.h b/src/mongo/util/transitional_tools_do_not_use/vector_spooling.h
new file mode 100644
index 00000000000..6cb44513dfd
--- /dev/null
+++ b/src/mongo/util/transitional_tools_do_not_use/vector_spooling.h
@@ -0,0 +1,66 @@
+/**
+* Copyright (C) 2017 MongoDB Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+* As a special exception, the copyright holders give permission to link the
+* code of portions of this program with the OpenSSL library under certain
+* conditions as described in each individual source file and distribute
+* linked combinations including the program with the OpenSSL library. You
+* must comply with the GNU Affero General Public License in all respects for
+* all of the code used other than as permitted herein. If you modify file(s)
+* with this exception, you may extend this exception to your version of the
+* file(s), but you are not obligated to do so. If you do not wish to do so,
+* delete this exception statement from your version. If you delete this
+* exception statement from all source files in the program, then also delete
+* it in the license file.
+*/
+
+#pragma once
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+namespace mongo {
+namespace transitional_tools_do_not_use {
+template <typename T>
+inline std::vector<T*> unspool_vector(const std::vector<std::unique_ptr<T>>& v) {
+ std::vector<T*> result;
+ result.reserve(v.size());
+ std::transform(
+ v.begin(), v.end(), std::back_inserter(result), [](const auto& p) { return p.get(); });
+ return result;
+}
+
+template <typename T>
+inline std::vector<std::unique_ptr<T>> spool_vector(const std::vector<T*>& v) noexcept {
+ std::vector<std::unique_ptr<T>> result;
+ result.reserve(v.size());
+ std::transform(v.begin(), v.end(), std::back_inserter(result), [](const auto& p) {
+ return std::unique_ptr<T>{p};
+ });
+ return result;
+}
+
+template <typename T>
+inline std::vector<T*> leak_vector(std::vector<std::unique_ptr<T>>& v) noexcept {
+ std::vector<T*> result;
+ result.reserve(v.size());
+ std::transform(
+ v.begin(), v.end(), std::back_inserter(result), [](auto& p) { return p.release(); });
+ return result;
+}
+} // namespace transitional_tools_do_not_use
+} // namespace mongo