diff options
Diffstat (limited to 'src/mongo/db')
49 files changed, 250 insertions, 194 deletions
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp index 81f06ce0fe0..f86a2981d0a 100644 --- a/src/mongo/db/auth/role_graph_builtin_roles.cpp +++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp @@ -131,20 +131,23 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) { // DB admin role - dbAdminRoleActions - << ActionType::collMod << ActionType::collStats // clusterMonitor gets this also - << ActionType::compact << ActionType::convertToCapped // read_write gets this also - << ActionType::createCollection // read_write gets this also - << ActionType::dbStats // clusterMonitor gets this also - << ActionType::dropCollection - << ActionType:: - dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase? - << ActionType::dropIndex << ActionType::createIndex << ActionType::indexStats - << ActionType::enableProfiler << ActionType::listCollections << ActionType::listIndexes - << ActionType::planCacheIndexFilter << ActionType::planCacheRead - << ActionType::planCacheWrite << ActionType::reIndex - << ActionType::renameCollectionSameDB // read_write gets this also - << ActionType::repairDatabase << ActionType::storageDetails << ActionType::validate; + dbAdminRoleActions << ActionType::collMod + << ActionType::collStats // clusterMonitor gets this also + << ActionType::compact + << ActionType::convertToCapped // read_write gets this also + << ActionType::createCollection // read_write gets this also + << ActionType::dbStats // clusterMonitor gets this also + << ActionType::dropCollection + // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase? + << ActionType::dropDatabase << ActionType::dropIndex + << ActionType::createIndex << ActionType::indexStats + << ActionType::enableProfiler << ActionType::listCollections + << ActionType::listIndexes << ActionType::planCacheIndexFilter + << ActionType::planCacheRead << ActionType::planCacheWrite + << ActionType::reIndex + << ActionType::renameCollectionSameDB // read_write gets this also + << ActionType::repairDatabase << ActionType::storageDetails + << ActionType::validate; // clusterMonitor role actions that target the cluster resource clusterMonitorRoleClusterActions diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index 32931f81652..b357c317c33 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -280,7 +280,8 @@ Client::WriteContext::WriteContext(OperationContext* opCtx, const std::string& n void Client::Context::checkNotStale() const { switch (_client->_curOp->getOp()) { case dbGetMore: // getMore's are special and should be handled else where - case dbUpdate: // update & delete check shard version in instance.cpp, so don't check here as well + // update & delete check shard version in instance.cpp, so don't check here as well + case dbUpdate: case dbDelete: break; default: { ensureShardVersionOKOrThrow(_ns); } diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp index d55cd57aaad..b4a032522cf 100644 --- a/src/mongo/db/commands/index_filter_commands_test.cpp +++ b/src/mongo/db/commands/index_filter_commands_test.cpp @@ -159,7 +159,8 @@ bool planCacheContains(const PlanCache& planCache, PlanCacheEntry* entry = *i; // Canonicalizing query shape in cache entry to get cache key. - // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only. + // Alternatively, we could add key to PlanCacheEntry but that would be used in one place + // only. ASSERT_OK( CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw)); scoped_ptr<CanonicalQuery> currentQuery(cqRaw); diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index fd4e72bd7cd..716f832a1ed 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -758,7 +758,8 @@ void State::init() { _scope->invoke(init, 0, 0, 0, true); // js function to run reduce on all keys - // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };"); + // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = + // hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };"); _reduceAll = _scope->createFunction( "var map = _mrMap;" "var list, ret;" @@ -1570,7 +1571,8 @@ public: } // fetch result from other shards 1 chunk at a time - // it would be better to do just one big $or query, but then the sorting would not be efficient + // it would be better to do just one big $or query, but then the sorting would not be + // efficient string shardName = shardingState.getShardName(); DBConfigPtr confOut = grid.getDBConfig(dbname, false); diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index ce95ef707d6..18e3f4fa383 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -144,7 +144,8 @@ Timer startupSrandTimer; QueryResult::View emptyMoreResult(long long); -/* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */ +/* todo: make this a real test. the stuff in dbtests/ seem to do all + * dbdirectclient which exhaust doesn't support yet. */ // QueryOption_Exhaust #define TESTEXHAUST 0 #if (TESTEXHAUST) diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp index 3f2241126c4..d9749c57b7c 100644 --- a/src/mongo/db/dbcommands.cpp +++ b/src/mongo/db/dbcommands.cpp @@ -1099,7 +1099,8 @@ public: if (str::equals("collMod", e.fieldName())) { // no-op } else if (str::startsWith(e.fieldName(), "$")) { - // no-op: ignore top-level fields prefixed with $. They are for the command processor. + // no-op: ignore top-level fields prefixed with $. They are for the command + // processor. } else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) { // no-op } else if (str::equals("index", e.fieldName())) { @@ -1359,7 +1360,8 @@ bool _execCommand(OperationContext* txn, LOG(1) << "command failed because of stale config, can retry" << causedBy(e) << endl; throw; } catch (DBException& e) { - // TODO: Rethrown errors have issues here, should divorce SendStaleConfigException from the DBException tree + // TODO: Rethrown errors have issues here, should divorce SendStaleConfigException from the + // DBException tree stringstream ss; ss << "exception: " << e.what(); diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp index e9b21758842..482147903c3 100644 --- a/src/mongo/db/dbcommands_generic.cpp +++ b/src/mongo/db/dbcommands_generic.cpp @@ -72,56 +72,58 @@ using std::stringstream; using std::vector; #if 0 - namespace cloud { - SimpleMutex mtx("cloud"); - Guarded< vector<string>, mtx > ips; - bool startedThread = false; - - void thread() { - bson::bo cmd; - while( 1 ) { - list<Target> L; - { - SimpleMutex::scoped_lock lk(mtx); - if( ips.ref(lk).empty() ) - continue; - for( unsigned i = 0; i < ips.ref(lk).size(); i++ ) { - L.push_back( Target(ips.ref(lk)[i]) ); - } +namespace cloud { + SimpleMutex mtx("cloud"); + Guarded< vector<string>, mtx > ips; + bool startedThread = false; + + void thread() { + bson::bo cmd; + while( 1 ) { + list<Target> L; + { + SimpleMutex::scoped_lock lk(mtx); + if( ips.ref(lk).empty() ) + continue; + for( unsigned i = 0; i < ips.ref(lk).size(); i++ ) { + L.push_back( Target(ips.ref(lk)[i]) ); } + } - /** repoll as machines might be down on the first lookup (only if not found previously) */ - sleepsecs(6); - } + /** repoll as machines might be down on the first lookup (only if not found previously) + * */ + sleepsecs(6); } } +} - class CmdCloud : public Command { - public: - CmdCloud() : Command( "cloud" ) { } - virtual bool slaveOk() const { return true; } - virtual bool adminOnly() const { return true; } - virtual bool isWriteCommandForConfigServer() const { return false; } - virtual void help( stringstream &help ) const { - help << "internal command facilitating running in certain cloud computing environments"; - } - bool run(OperationContext* txn, const string& dbname, BSONObj& obj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { - if( !obj.hasElement("servers") ) { - vector<string> ips; - obj["servers"].Obj().Vals(ips); - { - SimpleMutex::scoped_lock lk(cloud::mtx); - cloud::ips.ref(lk).swap(ips); - if( !cloud::startedThread ) { - cloud::startedThread = true; - boost::thread thr(cloud::thread); - } +class CmdCloud : public Command { +public: + CmdCloud() : Command( "cloud" ) { } + virtual bool slaveOk() const { return true; } + virtual bool adminOnly() const { return true; } + virtual bool isWriteCommandForConfigServer() const { return false; } + virtual void help( stringstream &help ) const { + help << "internal command facilitating running in certain cloud computing environments"; + } + bool run(OperationContext* txn, const string& dbname, BSONObj& obj, int options, + string& errmsg, BSONObjBuilder& result, bool fromRepl ) { + if( !obj.hasElement("servers") ) { + vector<string> ips; + obj["servers"].Obj().Vals(ips); + { + SimpleMutex::scoped_lock lk(cloud::mtx); + cloud::ips.ref(lk).swap(ips); + if( !cloud::startedThread ) { + cloud::startedThread = true; + boost::thread thr(cloud::thread); } } - return true; } - } cmdCloud; + return true; + } +} cmdCloud; #endif class CmdBuildInfo : public Command { diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h index ed9b1144cd2..f7e1d4e776c 100644 --- a/src/mongo/db/dbhelpers.h +++ b/src/mongo/db/dbhelpers.h @@ -73,8 +73,8 @@ struct Helpers { /* fetch a single object from collection ns that matches query. set your db SavedContext first. - @param query - the query to perform. note this is the low level portion of query so "orderby : ..." - won't work. + @param query - the query to perform. note this is the low level portion of query so "orderby + : ..." won't work. @param requireIndex if true, assert if no index for the query. a way to guard against writing a slow query. diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h index 7e17c6235b6..6eacae1ec8d 100644 --- a/src/mongo/db/dbmessage.h +++ b/src/mongo/db/dbmessage.h @@ -74,8 +74,8 @@ namespace mongo { std::string collection; int nToSkip; int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit) - // greater than zero is simply a hint on how many objects to send back per "cursor batch". - // a negative number indicates a hard limit. + // greater than zero is simply a hint on how many objects to send back per + // "cursor batch". a negative number indicates a hard limit. JSObject query; [JSObject fieldsToReturn] dbGetMore: diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h index 7a3a1c6a840..fecf9d22361 100644 --- a/src/mongo/db/geo/s2.h +++ b/src/mongo/db/geo/s2.h @@ -29,8 +29,8 @@ #pragma once /* - * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags (struct vs class) - * in only the s2.h file + * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags + * (struct vs class) in only the s2.h file */ #ifdef __clang__ diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp index 8f3a1c97e1e..2316ef278b5 100644 --- a/src/mongo/db/matcher/expression_leaf.cpp +++ b/src/mongo/db/matcher/expression_leaf.cpp @@ -277,7 +277,8 @@ Status RegexMatchExpression::init(const StringData& path, } bool RegexMatchExpression::matchesSingleElement(const BSONElement& e) const { - // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl; + // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << + // std::endl; switch (e.type()) { case String: case Symbol: diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp index 6cbf51c817b..e570d376f1a 100644 --- a/src/mongo/db/mongod_options.cpp +++ b/src/mongo/db/mongod_options.cpp @@ -790,8 +790,8 @@ Status canonicalizeMongodOptions(moe::Environment* params) { } } - // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is - // set since that comes from the command line. + // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" + // is set since that comes from the command line. if (params->count("noprealloc")) { Status ret = params->set("storage.mmapv1.preallocDataFiles", moe::Value(!(*params)["noprealloc"].as<bool>())); diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h index 3bce52d3899..8b59b0dfbdc 100644 --- a/src/mongo/db/namespace_string.h +++ b/src/mongo/db/namespace_string.h @@ -143,7 +143,8 @@ public: NamespaceString getTargetNSForListIndexesGetMore() const; /** - * @return true if the namespace is valid. Special namespaces for internal use are considered as valid. + * @return true if the namespace is valid. Special namespaces for internal use are considered as + * valid. */ bool isValid() const { return validDBName(db()) && !coll().empty(); diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp index e5b846fb3aa..47a486eb5f2 100644 --- a/src/mongo/db/prefetch.cpp +++ b/src/mongo/db/prefetch.cpp @@ -53,8 +53,8 @@ using std::string; namespace repl { namespace { -// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not, -// at write time, we can just do an insert, which will be faster. +// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if +// it does not, at write time, we can just do an insert, which will be faster. // The count (of batches) and time spent fetching pages before application // -- meaning depends on the prefetch behavior: all, _id index, none, etc.) @@ -69,8 +69,9 @@ void prefetchIndexPages(OperationContext* txn, Collection* collection, const BackgroundSync::IndexPrefetchConfig& prefetchConfig, const BSONObj& obj) { - // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type? - // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts. + // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op + // type? One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for + // inserts. // #3 (per op), a big issue would be "too many knobs". switch (prefetchConfig) { case BackgroundSync::PREFETCH_NONE: diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index ade91aabbb9..1d2cf5164eb 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -883,8 +883,8 @@ std::string getProjectedDottedField(const std::string& field, bool* isIDOut) { // Generate prefix of field up to (but not including) array index. std::vector<std::string> prefixStrings(res); prefixStrings.resize(i); - // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string - // to the end of projectedField. + // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined + // string to the end of projectedField. std::string projectedField; mongo::joinStringDelim(prefixStrings, &projectedField, '.'); return projectedField; diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h index 1503a3eecb0..885f0e7d7a6 100644 --- a/src/mongo/db/query/planner_access.h +++ b/src/mongo/db/query/planner_access.h @@ -218,8 +218,8 @@ public: // a filter on the entire tree. // 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess // who set the value of inArrayOperator to true. - // 3. No compound indices are used and no bounds are combined. These are incorrect in the context - // of these operators. + // 3. No compound indices are used and no bounds are combined. These are incorrect in the + // context of these operators. // /** diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp index 59e66c5485f..65c76400089 100644 --- a/src/mongo/db/query/query_planner_test.cpp +++ b/src/mongo/db/query/query_planner_test.cpp @@ -2177,7 +2177,8 @@ TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) { // // Geo -// http://docs.mongodb.org/manual/reference/operator/query-geospatial/#geospatial-query-compatibility-chart +// http://docs.mongodb.org/ +// manual/reference/operator/query-geospatial/#geospatial-query-compatibility-chart // TEST_F(QueryPlannerTest, Basic2DNonNear) { @@ -3185,15 +3186,16 @@ TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) { /* TEST_F(QueryPlannerTest, SortOnGeoQuery) { addIndex(BSON("timestamp" << -1 << "position" << "2dsphere")); - BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}"); - BSONObj sort = fromjson("{timestamp: -1}"); + BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: + [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}"); BSONObj sort = fromjson("{timestamp: + -1}"); runQuerySortProj(query, sort, BSONObj()); ASSERT_EQUALS(getNumSolutions(), 2U); assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, " "node: {cscan: {dir: 1}}}}"); - assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}"); -} + assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: + '2dsphere'}}}}}"); } TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) { // true means multikey diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index f1a2b36ef33..175c3a1fe9d 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -405,7 +405,8 @@ bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) { syncRollback(txn, _replCoord->getMyLastOptime(), &r, _replCoord); return true; } - /* we're not ahead? maybe our new query got fresher data. best to come back and try again */ + /* we're not ahead? maybe our new query got fresher data. best to come back and try + * again */ log() << "replSet syncTail condition 1"; sleepsecs(1); } catch (DBException& e) { diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp index 8844ae7828d..7b3457008ba 100644 --- a/src/mongo/db/repl/master_slave.cpp +++ b/src/mongo/db/repl/master_slave.cpp @@ -28,11 +28,13 @@ /* Collections we use: - local.sources - indicates what sources we pull from as a "slave", and the last update of each + local.sources - indicates what sources we pull from as a "slave", and the last update of + each local.oplog.$main - our op log as "master" local.dbinfo.<dbname> - no longer used??? - local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy. - used when replacing other half of the pair which has permanently failed. + local.pair.startup - [deprecated] can contain a special value indicating for a pair that we + have the master copy. used when replacing other half of the pair which + has permanently failed. local.pair.sync - [deprecated] { initialsynccomplete: 1 } */ @@ -667,7 +669,8 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn, scoped_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState())); if (replAllDead) { - // hmmm why is this check here and not at top of this function? does it get set between top and here? + // hmmm why is this check here and not at top of this function? does it get set between top + // and here? log() << "replAllDead, throwing SyncException: " << replAllDead << endl; throw SyncException(); } @@ -926,7 +929,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) { verify(syncedTo < nextOpTime); throw SyncException(); } else { - /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */ + /* t == syncedTo, so the first op was applied previously or it is the first op of + * initial query and need not be applied. */ } } @@ -1048,7 +1052,8 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) { } nClonedThisPass = 0; - // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName. + // FIXME Handle cases where this db isn't on default port, or default port is spec'd in + // hostName. if ((string("localhost") == hostName || string("127.0.0.1") == hostName) && serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) { log() << "repl: can't sync from self (localhost). sources configuration may be wrong." @@ -1157,9 +1162,8 @@ static void replMain(OperationContext* txn) { break; } } - verify( - syncing == - 0); // i.e., there is only one sync thread running. we will want to change/fix this. + // i.e., there is only one sync thread running. we will want to change/fix this. + verify(syncing == 0); syncing++; } @@ -1337,7 +1341,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) { void pretouchOperation(OperationContext* txn, const BSONObj& op) { if (txn->lockState()->isWriteLocked()) { - return; // no point pretouching if write locked. not sure if this will ever fire, but just in case. + // no point pretouching if write locked. not sure if this will ever fire, but just in case. + return; } const char* which = "o"; diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h index 74e509302f7..f0aee54e8d0 100644 --- a/src/mongo/db/repl/master_slave.h +++ b/src/mongo/db/repl/master_slave.h @@ -35,7 +35,8 @@ /* replication data overview at the slave: - local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } } + local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: + { ... }, incompleteCloneDbs: { ... } } at the master: local.oplog.$<source> @@ -71,7 +72,8 @@ public: Can be a group of things to replicate for several databases. - { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } } + { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: + { ... } } 'source' defaults to 'main'; support for multiple source names is not done (always use main for now). @@ -137,8 +139,9 @@ public: std::string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; } - std::string - only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating. + // only a certain db. note that in the sources collection, this may not be changed once you + // start replicating. + std::string only; /* the last time point we have already synced up to (in the remote/master's oplog). */ OpTime syncedTo; diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index df2c04b8e22..1d131d0af1e 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -510,7 +510,8 @@ void createOplog(OperationContext* txn) { // ------------------------------------- /** @param fromRepl false if from ApplyOpsCmd - @return true if was and update should have happened and the document DNE. see replset initial sync code. + @return true if was and update should have happened and the document DNE. + see replset initial sync code. */ bool applyOperation_inlock(OperationContext* txn, Database* db, @@ -602,8 +603,8 @@ bool applyOperation_inlock(OperationContext* txn, << "warning, repl doing slow updates (no _id field) for " << ns << endl; } } else { - /* todo : it may be better to do an insert here, and then catch the dup key exception and do update - then. very few upserts will not be inserts... + /* todo : it may be better to do an insert here, and then catch the dup key + * exception and do update then. very few upserts will not be inserts... */ BSONObjBuilder b; b.append(_id); @@ -648,8 +649,8 @@ bool applyOperation_inlock(OperationContext* txn, log() << "replication failed to apply update: " << op.toString() << endl; } // need to check to see if it isn't present so we can set failedUpdate correctly. - // note that adds some overhead for this extra check in some cases, such as an updateCriteria - // of the form + // note that adds some overhead for this extra check in some cases, such as an + // updateCriteria of the form // { _id:..., { x : {$size:...} } // thus this is not ideal. else { @@ -663,8 +664,8 @@ bool applyOperation_inlock(OperationContext* txn, log() << "replication couldn't find doc: " << op.toString() << endl; } - // Otherwise, it's present; zero objects were updated because of additional specifiers - // in the query for idempotence + // Otherwise, it's present; zero objects were updated because of additional + // specifiers in the query for idempotence } } else { // this could happen benignly on an oplog duplicate replay of an upsert diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h index 5c1e6032acc..6ba6d9dbd44 100644 --- a/src/mongo/db/repl/repl_settings.h +++ b/src/mongo/db/repl/repl_settings.h @@ -51,7 +51,8 @@ class ReplSettings { public: SlaveTypes slave; - /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */ + /** true means we are master and doing replication. if we are not writing to oplog, this won't + * be true. */ bool master; bool fastsync; diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp index 079f6e2227a..b1114bd9d1f 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp @@ -521,7 +521,7 @@ TEST_F(ReplCoordTest, ReconfigDuringHBReconfigFails) { // net->exitNetwork(); // stopCapturingLogMessages(); // ASSERT_EQUALS(1, -// countLogLinesContaining("because already in the midst of a configuration process")); +// countLogLinesContaining("because already in the midst of a configuration process")); // reconfigThread.join(); // logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log()); // } diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp index 72449c7c6a5..be4caa5e2bf 100644 --- a/src/mongo/db/repl/replication_info.cpp +++ b/src/mongo/db/repl/replication_info.cpp @@ -112,7 +112,8 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le if (level > 1) { wassert(!txn->lockState()->isLocked()); - // note: there is no so-style timeout on this connection; perhaps we should have one. + // note: there is no so-style timeout on this connection; perhaps we should have + // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); diff --git a/src/mongo/db/repl/scoped_conn.h b/src/mongo/db/repl/scoped_conn.h index 85f6dd5080f..80cc1810250 100644 --- a/src/mongo/db/repl/scoped_conn.h +++ b/src/mongo/db/repl/scoped_conn.h @@ -78,9 +78,10 @@ public: connInfo->setTimeout(timeout); } - /* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic. - So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes - ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed. + /* If we were to run a query and not exhaust the cursor, future use of the connection would be + * problematic. So here what we do is wrapper known safe methods and not allow cursor-style + * queries at all. This makes ScopedConn limited in functionality but very safe. More + * non-cursor wrappers can be added here if needed. */ bool runCommand(const std::string& dbname, const BSONObj& cmd, BSONObj& info, int options = 0) { return conn()->runCommand(dbname, cmd, info, options); diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp index 8742f25e285..bee3fb4f86a 100644 --- a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp +++ b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp @@ -134,8 +134,8 @@ void AlignedBuilder::_malloc(unsigned sz) { _p._allocationAddress = p; _p._data = (char*)p; #elif defined(__linux__) - // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris. - // so for now, linux only for this. + // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be + // tested on solaris. so for now, linux only for this. void* p = 0; int res = posix_memalign(&p, Alignment, sz); massert(13524, "out of memory AlignedBuilder", res == 0); diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp index 317ab5919cd..55acad840af 100644 --- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp +++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp @@ -2382,10 +2382,10 @@ public: } // too much work to try to make this happen through inserts and deletes // we are intentionally manipulating the btree bucket directly here - BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket ); - writing(L)->Null(); - writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused - BSONObj k = BSON( "a" << toInsert ); + BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 + ).prevChildBucket ); writing(L)->Null(); + writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) + |= 1; // make unused BSONObj k = BSON( "a" << toInsert ); Base::insert( k ); } }; diff --git a/src/mongo/db/storage/mmap_v1/btree/key.cpp b/src/mongo/db/storage/mmap_v1/btree/key.cpp index baa934c525f..915171d4b0e 100644 --- a/src/mongo/db/storage/mmap_v1/btree/key.cpp +++ b/src/mongo/db/storage/mmap_v1/btree/key.cpp @@ -197,7 +197,8 @@ int KeyBson::woCompare(const KeyBson& r, const Ordering& o) const { return oldCompare(_o, r._o, o); } -// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort +// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a +// big effort bool KeyBson::woEqual(const KeyBson& r) const { return oldCompare(_o, r._o, nullOrdering) == 0; } @@ -495,7 +496,8 @@ static int compare(const unsigned char*& l, const unsigned char*& r) { int llen = binDataCodeToLength(L); int diff = L - R; // checks length and subtype simultaneously if (diff) { - // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...) + // unfortunately nibbles are backwards to do subtype and len in one check (could bit + // swap...) int rlen = binDataCodeToLength(R); if (llen != rlen) return llen - rlen; diff --git a/src/mongo/db/storage/mmap_v1/btree/key.h b/src/mongo/db/storage/mmap_v1/btree/key.h index 4787d83281a..d6546a76d77 100644 --- a/src/mongo/db/storage/mmap_v1/btree/key.h +++ b/src/mongo/db/storage/mmap_v1/btree/key.h @@ -83,8 +83,9 @@ class KeyV1Owned; class KeyV1 { void operator=( const KeyV1&); // disallowed just to make people be careful as we don't own the buffer - KeyV1( - const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope + // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope + KeyV1(const KeyV1Owned&); + public: KeyV1() { _keyData = 0; diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.h b/src/mongo/db/storage/mmap_v1/catalog/namespace.h index d24f576bb01..d854eeff989 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace.h +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.h @@ -84,12 +84,12 @@ public: return buf; } - /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes - (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward - file support. */ + /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more + * than 10 indexes (more than 10 IndexDetails). It's a bit hacky because of this late addition + * with backward file support. */ std::string extraName(int i) const; - bool isExtra() - const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */ + /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */ + bool isExtra() const; enum MaxNsLenValue { // Maximum possible length of name any namespace, including special ones like $extra. diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h index 42ca38a36e2..1aee8f9ad1a 100644 --- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h +++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h @@ -97,8 +97,8 @@ public: DiskLoc capExtent; // the "current" extent we're writing too for a capped collection DiskLoc capFirstNewRecord; - unsigned short - _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h + // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h + unsigned short _dataFileVersion; unsigned short _indexFileVersion; unsigned long long multiKeyIndexBits; diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h index 64df4ebdd61..a78ac911c5b 100644 --- a/src/mongo/db/storage/mmap_v1/data_file.h +++ b/src/mongo/db/storage/mmap_v1/data_file.h @@ -126,8 +126,9 @@ class DataFileHeader { public: DataFileVersion version; int fileLength; - DiskLoc - unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */ + /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more + * */ + DiskLoc unused; int unusedLength; DiskLoc freeListStart; DiskLoc freeListEnd; diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h index 1e4d8649022..9f49a39e5d1 100644 --- a/src/mongo/db/storage/mmap_v1/diskloc.h +++ b/src/mongo/db/storage/mmap_v1/diskloc.h @@ -52,12 +52,15 @@ class BtreeBucket; (such as adding a virtual function) */ class DiskLoc { - int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine + // this will be volume, file #, etc. but is a logical value could be anything depending on + // storage engine + int _a; int ofs; public: enum SentinelValues { - /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */ + /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but + * outside DiskLoc context so confusing as-is. */ NullOfs = -1, // Caps the number of files that may be allocated in a database, allowing about 32TB of @@ -96,8 +99,9 @@ public: } DiskLoc& Null() { _a = -1; - ofs = - 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */ + /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but + * outside DiskLoc context so confusing as-is. */ + ofs = 0; return *this; } void assertOk() const { diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp index fc0bcdf84c2..cfc5c72db05 100644 --- a/src/mongo/db/storage/mmap_v1/dur.cpp +++ b/src/mongo/db/storage/mmap_v1/dur.cpp @@ -34,19 +34,22 @@ we could be in read lock for this for very large objects write directly to redo log in situ? WRITETOJOURNAL - we could be unlocked (the main db lock that is...) for this, with sufficient care, but there is some complexity - have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that). - for now (1.7.5/1.8.0) we are in read lock which is not ideal. + we could be unlocked (the main db lock that is...) for this, with sufficient care, but there + is some complexity have to handle falling behind which would use too much ram (going back + into a read lock would suffice to stop that). for now (1.7.5/1.8.0) we are in read lock which + is not ideal. WRITETODATAFILES - actually write to the database data files in this phase. currently done by memcpy'ing the writes back to - the non-private MMF. alternatively one could write to the files the traditional way; however the way our - storage engine works that isn't any faster (actually measured a tiny bit slower). + actually write to the database data files in this phase. currently done by memcpy'ing the + writes back to the non-private MMF. alternatively one could write to the files the + traditional way; however the way our storage engine works that isn't any faster (actually + measured a tiny bit slower). REMAPPRIVATEVIEW - we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real - remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want - to be too frequent. - there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will - be required. so doing these remaps fractionally is helpful. + we could in a write lock quickly flip readers back to the main view, then stay in read lock + and do our real remapping. with many files (e.g., 1000), remapping could be time consuming + (several ms), so we don't want to be too frequent. + + there could be a slow down immediately after remapping as fresh copy-on-writes for commonly + written pages will be required. so doing these remaps fractionally is helpful. mutexes: diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp index c1e56b9790f..0ab2ff648ca 100644 --- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp @@ -390,10 +390,10 @@ void _preallocateFiles() { unsigned long long limit = DataLimitPerJournalFile; if (debug && i == 1) { - // moving 32->64, the prealloc files would be short. that is "ok", but we want to exercise that - // case, so we force exercising here when _DEBUG is set by arbitrarily stopping prealloc at a low - // limit for a file. also we want to be able to change in the future the constant without a lot of - // work anyway. + // moving 32->64, the prealloc files would be short. that is "ok", but we want to + // exercise that case, so we force exercising here when _DEBUG is set by arbitrarily + // stopping prealloc at a low limit for a file. also we want to be able to change in + // the future the constant without a lot of work anyway. limit = 16 * 1024 * 1024; } preallocateFile(filepath, limit); diff --git a/src/mongo/db/storage/mmap_v1/dur_journalformat.h b/src/mongo/db/storage/mmap_v1/dur_journalformat.h index 3c31c2686dd..6392a5600fa 100644 --- a/src/mongo/db/storage/mmap_v1/dur_journalformat.h +++ b/src/mongo/db/storage/mmap_v1/dur_journalformat.h @@ -49,11 +49,12 @@ struct JHeader { JHeader() {} JHeader(std::string fname); - char magic - [2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something... + // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or + // something... + char magic[2]; -// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near -// that. simply incrementing the version # is safe on a fwd basis. +// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were +// near that. simply incrementing the version # is safe on a fwd basis. #if defined(_NOCOMPRESS) enum { CurrentVersion = 0x4148 }; #else @@ -62,15 +63,15 @@ struct JHeader { unsigned short _version; // these are just for diagnostic ease (make header more useful as plain text) - char n1; // '\n' - char ts[20]; // ascii timestamp of file generation. for user reading, not used by code. - char n2; // '\n' - char dbpath - [128]; // path/filename of this file for human reading and diagnostics. not used by code. - char n3, n4; // '\n', '\n' + char n1; // '\n' + char ts[20]; // ascii timestamp of file generation. for user reading, not used by code. + char n2; // '\n' + char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by + // code. + char n3, n4; // '\n', '\n' - unsigned long long - fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files + // unique identifier that will be in each JSectHeader. important as we recycle prealloced files + unsigned long long fileId; char reserved3[8026]; // 8KB total for the file header char txt2[2]; // "\n\n" at the end @@ -112,7 +113,8 @@ public: }; /** an individual write operation within a group commit section. Either the entire section should - be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.) + be applied, or nothing. (We check the md5 for the whole section before doing anything on + recovery.) */ struct JEntry { enum OpCodes { diff --git a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h index 06441653fe3..e51608b69e4 100644 --- a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h +++ b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h @@ -108,8 +108,8 @@ private: static void preFlush(); static void postFlush(); unsigned long long _preFlushTime; - unsigned long long - _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching) + // data < this time is fsynced in the datafiles (unless hard drive controller is caching) + unsigned long long _lastFlushTime; bool _writeToLSNNeeded; void updateLSNFile(); }; diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp index 3caf910ab37..ea3b4e85148 100644 --- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp @@ -69,7 +69,9 @@ static DurableMappedFile* findMMF_inlock(void* ptr, size_t& ofs) { DurableMappedFile* f = privateViews.find_inlock(ptr, ofs); if (f == 0) { error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl; - printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why + // we want a stack trace and the assert below didn't print a trace once in the real world - + // not sure why + printStackTrace(); stringstream ss; ss << "view pointer cannot be resolved " << std::hex << (size_t)ptr; journalingFailure(ss.str().c_str()); // asserts, which then abends diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp index 49daa8c6550..c37fbd23ef7 100644 --- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp +++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp @@ -207,7 +207,8 @@ public: _entries->skip(len + 1); // skip '\0' too _entries->read(lenOrOpCode); // read this for the fall through } - // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet + // fall through as a basic operation always follows jdbcontext, and we don't have + // anything to return yet default: // fall through @@ -520,7 +521,8 @@ bool RecoveryJob::processFile(boost::filesystem::path journalfile) { return true; } } catch (...) { - // if something weird like a permissions problem keep going so the massert down below can happen (presumably) + // if something weird like a permissions problem keep going so the massert down below can + // happen (presumably) log() << "recover exception checking filesize" << endl; } diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h index 8ec6f8c024f..35cee01d0c6 100644 --- a/src/mongo/db/storage/mmap_v1/dur_stats.h +++ b/src/mongo/db/storage/mmap_v1/dur_stats.h @@ -33,8 +33,9 @@ namespace mongo { namespace dur { -/** journaling stats. the model here is that the commit thread is the only writer, and that reads are - uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead. +/** journaling stats. the model here is that the commit thread is the only writer, and that reads + * are uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter + * overhead. */ struct Stats { struct S { diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp index d8757c90622..92cc7e84ef9 100644 --- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp +++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp @@ -28,8 +28,10 @@ * it in the license file. */ -/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such - if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this. +/* this module adds some of our layers atop memory mapped files - specifically our handling of + * private views & such + if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile + class, not this. */ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage @@ -275,9 +277,8 @@ bool DurableMappedFile::finishOpening() { "(look in log for " "more information)"); } - privateViews.add_inlock( - _view_private, - this); // note that testIntent builds use this, even though it points to view_write then... + // note that testIntent builds use this, even though it points to view_write then... + privateViews.add_inlock(_view_private, this); } else { _view_private = _view_write; } diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h index a34fdef6fb2..230dbc31349 100644 --- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h +++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h @@ -36,9 +36,9 @@ namespace mongo { -/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such. - if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, - not this. +/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of + * private views & such. if you don't care about journaling/durability (temp sort files & such) use + * MemoryMappedFile class, not this. */ class DurableMappedFile : private MemoryMappedFile { protected: @@ -275,6 +275,7 @@ inline void PointerToDurableMappedFile::makeWritable(void* privateView, unsigned inline void PointerToDurableMappedFile::makeWritable(void* _p, unsigned len) {} #endif -// allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object +// allows a pointer into any private view of a DurableMappedFile to be resolved to the +// DurableMappedFile object extern PointerToDurableMappedFile privateViews; } diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp index d7d36bc36e3..754e486f1c2 100644 --- a/src/mongo/db/storage/mmap_v1/durop.cpp +++ b/src/mongo/db/storage/mmap_v1/durop.cpp @@ -136,9 +136,9 @@ bool FileCreatedOp::needFilesClosed() { } void FileCreatedOp::replay() { - // i believe the code assumes new files are filled with zeros. thus we have to recreate the file, - // or rewrite at least, even if it were the right length. perhaps one day we should change that - // although easier to avoid defects if we assume it is zeros perhaps. + // i believe the code assumes new files are filled with zeros. thus we have to recreate the + // file, or rewrite at least, even if it were the right length. perhaps one day we should + // change that although easier to avoid defects if we assume it is zeros perhaps. string full = _p.asFullPath(); if (boost::filesystem::exists(full)) { try { diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h index 337f8177970..b6d80538524 100644 --- a/src/mongo/db/storage/mmap_v1/durop.h +++ b/src/mongo/db/storage/mmap_v1/durop.h @@ -44,9 +44,9 @@ namespace dur { /** DurOp - Operations we journal that aren't just basic writes. * - * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent. - * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of - * them (we don't want a vtable for example there). + * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct + * dur::WriteIntent. We don't make WriteIntent inherit from DurOp to keep it as lean as possible as + * there will be millions of them (we don't want a vtable for example there). * * For each op we want to journal, we define a subclass. */ diff --git a/src/mongo/db/storage/mmap_v1/extent.h b/src/mongo/db/storage/mmap_v1/extent.h index 9d6d3935346..16af89fb42b 100644 --- a/src/mongo/db/storage/mmap_v1/extent.h +++ b/src/mongo/db/storage/mmap_v1/extent.h @@ -42,7 +42,8 @@ namespace mongo { /* extents are datafile regions where all the records within the region belong to the same namespace. -(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord +(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big + DeletedRecord (11:12:55 AM) dm10gen: and that is placed on the free list */ #pragma pack(1) diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp index c34cf60df5f..2b1cb3dfb15 100644 --- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp +++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp @@ -122,7 +122,8 @@ public: sleepmillis(4); } long long y = t2.micros() - 4 * N * 1000; - // not really trusting the timer granularity on all platforms so whichever is higher of x and y + // not really trusting the timer granularity on all platforms so whichever is higher + // of x and y bb[pass].append("8KBWithPauses", max(x, y) / (N * 1000.0)); } { diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp index aaa28e1f1fc..41cf7b70a91 100644 --- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp +++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp @@ -443,8 +443,8 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn, break; } if (t.seconds() >= 2) { - // have spent lots of time in write lock, and we are in [low,high], so close enough - // could come into play if extent freelist is very long + // have spent lots of time in write lock, and we are in [low,high], so close + // enough could come into play if extent freelist is very long break; } } else { diff --git a/src/mongo/db/storage/mmap_v1/record.h b/src/mongo/db/storage/mmap_v1/record.h index 131b1144ff6..de3ffaef2cb 100644 --- a/src/mongo/db/storage/mmap_v1/record.h +++ b/src/mongo/db/storage/mmap_v1/record.h @@ -42,13 +42,16 @@ class DeletedRecord; /* Record is a record in a datafile. DeletedRecord is similar but for deleted space. *11:03:20 AM) dm10gen: regarding extentOfs... -(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords +(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and + DeleteRecords (11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total) -(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset +(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full + extent address, we keep just the offset (11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo (11:04:33 AM) dm10gen: see class DiskLoc for more info (11:04:43 AM) dm10gen: so that is how Record::myExtent() works -(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs then +(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs + then */ #pragma pack(1) class Record { diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp index 9b1817707c9..a0b9e770ead 100644 --- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp +++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp @@ -791,7 +791,8 @@ Status RecordStoreV1Base::validate(OperationContext* txn, if (loc.questionable()) { if (isCapped() && !loc.isValid() && i == 1) { - /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid + /* the constructor for NamespaceDetails intentionally sets + * deletedList[1] to invalid see comments in namespace.h */ break; |