summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/commands')
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/dbhash.cpp4
-rw-r--r--src/mongo/db/commands/distinct.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp6
-rw-r--r--src/mongo/db/commands/list_collections.cpp6
-rw-r--r--src/mongo/db/commands/list_indexes.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp14
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp4
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp8
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp20
-rw-r--r--src/mongo/db/commands/repair_cursor.cpp6
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp6
12 files changed, 44 insertions, 44 deletions
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index a532a1e402f..2ef62f8b090 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -57,7 +57,7 @@
namespace mongo {
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
using std::stringstream;
using std::endl;
@@ -146,7 +146,7 @@ namespace mongo {
<< " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
Cloner cloner;
- auto_ptr<DBClientConnection> myconn;
+ unique_ptr<DBClientConnection> myconn;
myconn.reset( new DBClientConnection() );
if ( ! myconn->connect( HostAndPort(fromhost) , errmsg ) )
return false;
diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp
index b99d8305a40..e96fb4b6288 100644
--- a/src/mongo/db/commands/dbhash.cpp
+++ b/src/mongo/db/commands/dbhash.cpp
@@ -48,7 +48,7 @@
namespace mongo {
using boost::scoped_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::list;
using std::endl;
using std::set;
@@ -97,7 +97,7 @@ namespace mongo {
IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex( opCtx );
- auto_ptr<PlanExecutor> exec;
+ unique_ptr<PlanExecutor> exec;
if ( desc ) {
exec.reset(InternalPlanner::indexScan(opCtx,
collection,
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 778bfdd723d..ceae947fc52 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -47,7 +47,7 @@
namespace mongo {
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
using std::stringstream;
@@ -131,7 +131,7 @@ namespace mongo {
return 0;
}
- auto_ptr<PlanExecutor> exec(rawExec);
+ unique_ptr<PlanExecutor> exec(rawExec);
BSONObj obj;
PlanExecutor::ExecState state;
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index aaa54f054be..a1781f2b5bd 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -45,7 +45,7 @@ using namespace mongo;
namespace {
using boost::scoped_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
using std::vector;
@@ -97,10 +97,10 @@ namespace {
* Utility function to create a PlanRankingDecision
*/
PlanRankingDecision* createDecision(size_t numPlans) {
- auto_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
- auto_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
stats->specific.reset(new CollectionScanStats());
why->stats.mutableVector().push_back(stats.release());
why->scores.push_back(0U);
diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp
index 7773b0f0eed..a9fd6daea3b 100644
--- a/src/mongo/db/commands/list_collections.cpp
+++ b/src/mongo/db/commands/list_collections.cpp
@@ -130,8 +130,8 @@ namespace mongo {
names.sort();
}
- std::auto_ptr<WorkingSet> ws(new WorkingSet());
- std::auto_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
for (std::list<std::string>::const_iterator i = names.begin();
i != names.end();
@@ -174,7 +174,7 @@ namespace mongo {
cursorNamespace,
PlanExecutor::YIELD_MANUAL,
&rawExec);
- std::auto_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
if (!makeStatus.isOK()) {
return appendCommandStatus( result, makeStatus );
}
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index d84e605c21f..6cdee4785fd 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -136,8 +136,8 @@ namespace mongo {
cce->getAllIndexes( txn, &indexNames );
} MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "listIndexes", ns.ns());
- std::auto_ptr<WorkingSet> ws(new WorkingSet());
- std::auto_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<QueuedDataStage> root(new QueuedDataStage(ws.get()));
for ( size_t i = 0; i < indexNames.size(); i++ ) {
BSONObj indexSpec;
@@ -166,7 +166,7 @@ namespace mongo {
cursorNamespace,
PlanExecutor::YIELD_MANUAL,
&rawExec);
- std::auto_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
if (!makeStatus.isOK()) {
return appendCommandStatus( result, makeStatus );
}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 3fbd4460493..099be62e3bc 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -74,7 +74,7 @@ namespace mongo {
using boost::scoped_ptr;
using boost::shared_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::endl;
using std::set;
using std::string;
@@ -626,7 +626,7 @@ namespace mongo {
"M/R Merge Post Processing Progress",
count);
}
- auto_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
+ unique_ptr<DBClientCursor> cursor = _db.query(_config.tempNamespace , BSONObj());
while (cursor->more()) {
ScopedTransaction scopedXact(_txn, MODE_IX);
Lock::DBLock lock(_txn->lockState(),
@@ -650,7 +650,7 @@ namespace mongo {
"M/R Reduce Post Processing Progress",
count);
}
- auto_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
+ unique_ptr<DBClientCursor> cursor = _db.query( _config.tempNamespace , BSONObj() );
while ( cursor->more() ) {
ScopedTransaction transaction(txn, MODE_X);
Lock::GlobalWrite lock(txn->lockState()); // TODO(erh) why global?
@@ -1048,7 +1048,7 @@ namespace mongo {
BSONObj(),
&cqRaw,
whereCallback).isOK());
- std::auto_ptr<CanonicalQuery> cq(cqRaw);
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
Collection* coll = getCollectionOrUassert(ctx->getDb(), _config.incLong);
invariant(coll);
@@ -1120,7 +1120,7 @@ namespace mongo {
return;
}
- auto_ptr<InMemory> n( new InMemory() ); // for new data
+ unique_ptr<InMemory> n( new InMemory() ); // for new data
long nSize = 0;
_dupCount = 0;
@@ -1327,7 +1327,7 @@ namespace mongo {
CollectionMetadataPtr collMetadata;
// Prevent sharding state from changing during the MR.
- auto_ptr<RangePreserver> rangePreserver;
+ unique_ptr<RangePreserver> rangePreserver;
{
AutoGetCollectionForRead ctx(txn, config.ns);
@@ -1416,7 +1416,7 @@ namespace mongo {
uasserted(17238, "Can't canonicalize query " + config.filter.toString());
return 0;
}
- std::auto_ptr<CanonicalQuery> cq(cqRaw);
+ std::unique_ptr<CanonicalQuery> cq(cqRaw);
Database* db = scopedAutoDb->getDb();
Collection* coll = state.getCollectionOrUassert(db, config.ns);
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 08b760825d6..36acc5d9bb8 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -41,7 +41,7 @@
namespace mongo {
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
class ParallelCollectionScanCmd : public Command {
@@ -115,7 +115,7 @@ namespace mongo {
Status execStatus = PlanExecutor::make(txn, ws, mis, collection,
PlanExecutor::YIELD_AUTO, &rawExec);
invariant(execStatus.isOK());
- auto_ptr<PlanExecutor> curExec(rawExec);
+ unique_ptr<PlanExecutor> curExec(rawExec);
// The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
// We have to deregister it, as it will be registered with ClientCursor.
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index 792442914ed..34c0c2e5976 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -59,7 +59,7 @@ namespace mongo {
using boost::intrusive_ptr;
using boost::scoped_ptr;
using boost::shared_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
using std::stringstream;
using std::endl;
@@ -219,7 +219,7 @@ namespace mongo {
PlanExecutor* exec = NULL;
scoped_ptr<ClientCursorPin> pin; // either this OR the execHolder will be non-null
- auto_ptr<PlanExecutor> execHolder;
+ unique_ptr<PlanExecutor> execHolder;
{
// This will throw if the sharding version for this connection is out of date. The
// lock must be held continuously from now until we have we created both the output
@@ -242,8 +242,8 @@ namespace mongo {
// Create the PlanExecutor which returns results from the pipeline. The WorkingSet
// ('ws') and the PipelineProxyStage ('proxy') will be owned by the created
// PlanExecutor.
- auto_ptr<WorkingSet> ws(new WorkingSet());
- auto_ptr<PipelineProxyStage> proxy(
+ unique_ptr<WorkingSet> ws(new WorkingSet());
+ unique_ptr<PipelineProxyStage> proxy(
new PipelineProxyStage(pPipeline, input, ws.get()));
Status execStatus = Status::OK();
if (NULL == collection) {
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index dd2c0333288..69a9c461a76 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -47,7 +47,7 @@ using namespace mongo;
namespace {
using boost::scoped_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::string;
using std::vector;
@@ -98,7 +98,7 @@ namespace {
* Utility function to create a SolutionCacheData
*/
SolutionCacheData* createSolutionCacheData() {
- auto_ptr<SolutionCacheData> scd(new SolutionCacheData());
+ unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
scd->tree.reset(new PlanCacheIndexTree());
return scd.release();
}
@@ -107,10 +107,10 @@ namespace {
* Utility function to create a PlanRankingDecision
*/
PlanRankingDecision* createDecision(size_t numPlans) {
- auto_ptr<PlanRankingDecision> why(new PlanRankingDecision());
+ unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
for (size_t i = 0; i < numPlans; ++i) {
CommonStats common("COLLSCAN");
- auto_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
+ unique_ptr<PlanStageStats> stats(new PlanStageStats(common, STAGE_COLLSCAN));
stats->specific.reset(new CollectionScanStats());
why->stats.mutableVector().push_back(stats.release());
why->scores.push_back(0U);
@@ -129,7 +129,7 @@ namespace {
// Create a canonical query
CanonicalQuery* cqRaw;
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cq(cqRaw);
+ unique_ptr<CanonicalQuery> cq(cqRaw);
// Plan cache with one entry
PlanCache planCache;
@@ -154,7 +154,7 @@ namespace {
// Create a canonical query
CanonicalQuery* cqRaw;
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cq(cqRaw);
+ unique_ptr<CanonicalQuery> cq(cqRaw);
// Plan cache with one entry
PlanCache planCache;
@@ -261,9 +261,9 @@ namespace {
// Create 2 canonical queries.
CanonicalQuery* cqRaw;
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cqA(cqRaw);
+ unique_ptr<CanonicalQuery> cqA(cqRaw);
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{b: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cqB(cqRaw);
+ unique_ptr<CanonicalQuery> cqB(cqRaw);
// Create plan cache with 2 entries.
PlanCache planCache;
@@ -378,7 +378,7 @@ namespace {
// Create a canonical query
CanonicalQuery* cqRaw;
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cq(cqRaw);
+ unique_ptr<CanonicalQuery> cq(cqRaw);
// Plan cache with one entry
PlanCache planCache;
@@ -397,7 +397,7 @@ namespace {
// Create a canonical query
CanonicalQuery* cqRaw;
ASSERT_OK(CanonicalQuery::canonicalize(ns, fromjson("{a: 1}"), &cqRaw));
- auto_ptr<CanonicalQuery> cq(cqRaw);
+ unique_ptr<CanonicalQuery> cq(cqRaw);
// Plan cache with one entry
PlanCache planCache;
diff --git a/src/mongo/db/commands/repair_cursor.cpp b/src/mongo/db/commands/repair_cursor.cpp
index 8828bdcdae0..0598b67b9c3 100644
--- a/src/mongo/db/commands/repair_cursor.cpp
+++ b/src/mongo/db/commands/repair_cursor.cpp
@@ -87,8 +87,8 @@ namespace mongo {
"repair iterator not supported"));
}
- std::auto_ptr<WorkingSet> ws(new WorkingSet());
- std::auto_ptr<MultiIteratorStage> stage(new MultiIteratorStage(txn, ws.get(),
+ std::unique_ptr<WorkingSet> ws(new WorkingSet());
+ std::unique_ptr<MultiIteratorStage> stage(new MultiIteratorStage(txn, ws.get(),
collection));
stage->addIterator(std::move(cursor));
@@ -100,7 +100,7 @@ namespace mongo {
PlanExecutor::YIELD_AUTO,
&rawExec);
invariant(execStatus.isOK());
- std::auto_ptr<PlanExecutor> exec(rawExec);
+ std::unique_ptr<PlanExecutor> exec(rawExec);
// 'exec' will be used in getMore(). It was automatically registered on construction
// due to the auto yield policy, so it could yield during plan selection. We deregister
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index 68150b76c81..f13689c1bf1 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -85,7 +85,7 @@
namespace mongo {
using boost::scoped_ptr;
- using std::auto_ptr;
+ using std::unique_ptr;
using std::endl;
using std::string;
using std::vector;
@@ -108,7 +108,7 @@ namespace mongo {
private:
WriteOpStats _stats;
- std::auto_ptr<WriteErrorDetail> _error;
+ std::unique_ptr<WriteErrorDetail> _error;
};
} // namespace
@@ -247,7 +247,7 @@ namespace mongo {
// OR if something succeeded and we're unordered.
//
- auto_ptr<WCErrorDetail> wcError;
+ unique_ptr<WCErrorDetail> wcError;
bool needToEnforceWC = writeErrors.empty()
|| ( !request.getOrdered()
&& writeErrors.size() < request.sizeWriteOps() );