summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-19 10:57:36 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:04 -0400
commit6f6fa5a63d482b0dc117eb2ac21cf096deb5a6f3 (patch)
treeb76c2a4dfc7f45eb25dd62cb3ffe89ea448d9e0e /src/mongo/db
parent9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (diff)
downloadmongo-6f6fa5a63d482b0dc117eb2ac21cf096deb5a6f3.tar.gz
SERVER-18978: Clang-Format - Fix comment word wrapping indentation
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authorization_session.h3
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp3
-rw-r--r--src/mongo/db/dbhelpers.h4
-rw-r--r--src/mongo/db/dbmessage.h3
-rw-r--r--src/mongo/db/geo/s2.h4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp3
-rw-r--r--src/mongo/db/mongod_options.cpp4
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/db/prefetch.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp4
-rw-r--r--src/mongo/db/query/planner_access.h4
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp9
-rw-r--r--src/mongo/db/repl/bgsync.cpp3
-rw-r--r--src/mongo/db/repl/master_slave.cpp24
-rw-r--r--src/mongo/db/repl/master_slave.h12
-rw-r--r--src/mongo/db/repl/oplog.cpp4
-rw-r--r--src/mongo/db/repl/repl_settings.h5
-rw-r--r--src/mongo/db/repl/replication_info.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/aligned_builder.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h21
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp24
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalformat.h28
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalimpl.h4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_stats.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/record.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp4
50 files changed, 203 insertions, 145 deletions
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index d6fe06e11fa..16c10334011 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -185,7 +185,8 @@ public:
// ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
bool isAuthorizedToRevokeRole(const RoleName& role);
- // Utility function for isAuthorizedToChangeOwnPasswordAsUser and isAuthorizedToChangeOwnCustomDataAsUser
+ // Utility function for isAuthorizedToChangeOwnPasswordAsUser and
+ // isAuthorizedToChangeOwnCustomDataAsUser
bool isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType);
// Returns true if the current session is authenticated as the given user and that user
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index 6b8a1762bce..213aa690b4a 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -138,8 +138,8 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
<< ActionType::createCollection // read_write gets this also
<< ActionType::dbStats // clusterMonitor gets this also
<< ActionType::dropCollection
- << ActionType::
- dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
+ << ActionType::dropDatabase // clusterAdmin gets this also TODO(spencer): should
+ // readWriteAnyDatabase?
<< ActionType::dropIndex << ActionType::createIndex << ActionType::indexStats
<< ActionType::enableProfiler << ActionType::listCollections << ActionType::listIndexes
<< ActionType::planCacheIndexFilter << ActionType::planCacheRead
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index b23b3d34ed8..ba6f6c87e6c 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -157,7 +157,8 @@ bool planCacheContains(const PlanCache& planCache,
PlanCacheEntry* entry = *i;
// Canonicalizing query shape in cache entry to get cache key.
- // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
+ // Alternatively, we could add key to PlanCacheEntry but that would be used in one place
+ // only.
ASSERT_OK(
CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw));
unique_ptr<CanonicalQuery> currentQuery(cqRaw);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 59eca8ae4c4..f32ebe3b8d8 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -781,7 +781,8 @@ void State::init() {
_scope->invoke(init, 0, 0, 0, true);
// js function to run reduce on all keys
- // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key);
+ // list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
_reduceAll = _scope->createFunction(
"var map = _mrMap;"
"var list, ret;"
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 146943fcfc3..a3e5f735afa 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -69,8 +69,8 @@ struct Helpers {
/* fetch a single object from collection ns that matches query.
set your db SavedContext first.
- @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
- won't work.
+ @param query - the query to perform. note this is the low level portion of query so
+ "orderby : ..." won't work.
@param requireIndex if true, assert if no index for the query. a way to guard against
writing a slow query.
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 10f57a17e09..795bdf75b90 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -74,7 +74,8 @@ namespace mongo {
std::string collection;
int nToSkip;
int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
- // greater than zero is simply a hint on how many objects to send back per "cursor batch".
+ // greater than zero is simply a hint on how many objects to send back per
+ // "cursor batch".
// a negative number indicates a hard limit.
JSObject query;
[JSObject fieldsToReturn]
diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h
index 7a3a1c6a840..293fe5185e4 100644
--- a/src/mongo/db/geo/s2.h
+++ b/src/mongo/db/geo/s2.h
@@ -29,8 +29,8 @@
#pragma once
/*
- * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags (struct vs class)
- * in only the s2.h file
+ * This file's purpose is to confine the suppression of the Clang warning for
+ * mismatched-tags (struct vs class) in only the s2.h file
*/
#ifdef __clang__
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 619fc64133d..8284ca8f3ab 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -275,7 +275,8 @@ Status RegexMatchExpression::init(StringData path, StringData regex, StringData
}
bool RegexMatchExpression::matchesSingleElement(const BSONElement& e) const {
- // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl;
+ // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e <<
+ // std::endl;
switch (e.type()) {
case String:
case Symbol:
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 27e929447a1..e3997f7d4a2 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -788,8 +788,8 @@ Status canonicalizeMongodOptions(moe::Environment* params) {
}
}
- // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is
- // set since that comes from the command line.
+ // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc"
+ // is set since that comes from the command line.
if (params->count("noprealloc")) {
Status ret = params->set("storage.mmapv1.preallocDataFiles",
moe::Value(!(*params)["noprealloc"].as<bool>()));
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index cd1067b7b45..881b9e1d468 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -149,7 +149,8 @@ public:
NamespaceString getTargetNSForListIndexesGetMore() const;
/**
- * @return true if the namespace is valid. Special namespaces for internal use are considered as valid.
+ * @return true if the namespace is valid. Special namespaces for internal use are considered as
+ * valid.
*/
bool isValid() const {
return validDBName(db()) && !coll().empty();
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 77a44c4b834..732e230741e 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -53,8 +53,8 @@ using std::string;
namespace repl {
namespace {
-// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not,
-// at write time, we can just do an insert, which will be faster.
+// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if
+// it does not, at write time, we can just do an insert, which will be faster.
// The count (of batches) and time spent fetching pages before application
// -- meaning depends on the prefetch behavior: all, _id index, none, etc.)
@@ -69,9 +69,9 @@ void prefetchIndexPages(OperationContext* txn,
Collection* collection,
const BackgroundSync::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
- // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
- // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
- // #3 (per op), a big issue would be "too many knobs".
+ // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op
+ // type? One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for
+ // inserts. #3 (per op), a big issue would be "too many knobs".
switch (prefetchConfig) {
case BackgroundSync::PREFETCH_NONE:
return;
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 189910bbae1..1472d6693ae 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1132,8 +1132,8 @@ std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
// Generate prefix of field up to (but not including) array index.
std::vector<std::string> prefixStrings(res);
prefixStrings.resize(i);
- // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
- // to the end of projectedField.
+ // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined
+ // string to the end of projectedField.
std::string projectedField;
mongo::joinStringDelim(prefixStrings, &projectedField, '.');
return projectedField;
diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h
index 55a05ff5161..8ab6bf9d58a 100644
--- a/src/mongo/db/query/planner_access.h
+++ b/src/mongo/db/query/planner_access.h
@@ -218,8 +218,8 @@ public:
// a filter on the entire tree.
// 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
// who set the value of inArrayOperator to true.
- // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
- // of these operators.
+ // 3. No compound indices are used and no bounds are combined. These are
+ // incorrect in the context of these operators.
//
/**
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index 11fc175d2ac..7129f01af73 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -601,15 +601,16 @@ TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
/*
TEST_F(QueryPlannerTest, SortOnGeoQuery) {
addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates:
+ [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}"); BSONObj sort = fromjson("{timestamp:
+ -1}");
runQuerySortProj(query, sort, BSONObj());
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
"node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
-}
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position:
+ '2dsphere'}}}}}"); }
TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
// true means multikey
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 5c84a724b94..81184fe42ad 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -421,7 +421,8 @@ bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
return true;
}
- /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
+ /* we're not ahead? maybe our new query got fresher data. best to come back and try
+ again */
log() << "syncTail condition 1";
sleepsecs(1);
} catch (DBException& e) {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 1e1bd428d39..074a7fcbe77 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -28,10 +28,12 @@
/* Collections we use:
- local.sources - indicates what sources we pull from as a "slave", and the last update of each
+ local.sources - indicates what sources we pull from as a "slave", and the last update of
+ each
local.oplog.$main - our op log as "master"
local.dbinfo.<dbname> - no longer used???
- local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy.
+ local.pair.startup - [deprecated] can contain a special value indicating for a pair that we
+ have the master copy.
used when replacing other half of the pair which has permanently failed.
local.pair.sync - [deprecated] { initialsynccomplete: 1 }
*/
@@ -736,7 +738,8 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
if (replAllDead) {
- // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ // hmmm why is this check here and not at top of this function? does it get set between top
+ // and here?
log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
throw SyncException();
}
@@ -993,7 +996,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
verify(syncedTo < nextOpTime);
throw SyncException();
} else {
- /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
+ /* t == syncedTo, so the first op was applied previously or it is the first op of
+ * initial query and need not be applied. */
}
}
@@ -1115,7 +1119,8 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) {
}
nClonedThisPass = 0;
- // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
+ // FIXME Handle cases where this db isn't on default port, or default port is spec'd in
+ // hostName.
if ((string("localhost") == hostName || string("127.0.0.1") == hostName) &&
serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) {
log() << "can't sync from self (localhost). sources configuration may be wrong." << endl;
@@ -1223,9 +1228,9 @@ static void replMain(OperationContext* txn) {
break;
}
}
- verify(
- syncing ==
- 0); // i.e., there is only one sync thread running. we will want to change/fix this.
+
+ // i.e., there is only one sync thread running. we will want to change/fix this.
+ verify(syncing == 0);
syncing++;
}
@@ -1398,7 +1403,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
void pretouchOperation(OperationContext* txn, const BSONObj& op) {
if (txn->lockState()->isWriteLocked()) {
- return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ return;
}
const char* which = "o";
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index d290be23de3..e26fd3c0f56 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -34,7 +34,8 @@
/* replication data overview
at the slave:
- local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ...,
+ dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
at the master:
local.oplog.$<source>
@@ -68,7 +69,8 @@ public:
Can be a group of things to replicate for several databases.
- { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... },
+ incompleteCloneDbs: { ... } }
'source' defaults to 'main'; support for multiple source names is
not done (always use main for now).
@@ -135,8 +137,10 @@ public:
std::string sourceName() const {
return _sourceName.empty() ? "main" : _sourceName;
}
- std::string
- only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ // only a certain db. note that in the sources collection, this may not be changed once you
+ // start replicating.
+ std::string only;
/* the last time point we have already synced up to (in the remote/master's oplog). */
Timestamp syncedTo;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 2afa1b53c52..26ab199fae2 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -687,8 +687,8 @@ Status applyOperation_inlock(OperationContext* txn,
return Status(ErrorCodes::OperationFailed, msg);
}
- // Otherwise, it's present; zero objects were updated because of additional specifiers
- // in the query for idempotence
+ // Otherwise, it's present; zero objects were updated because of additional
+ // specifiers in the query for idempotence
} else {
// this could happen benignly on an oplog duplicate replay of an upsert
// (because we are idempotent),
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index 5c1e6032acc..1ce5fa6b4dc 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -51,7 +51,10 @@ class ReplSettings {
public:
SlaveTypes slave;
- /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
+ /**
+ * true means we are master and doing replication. if we are not writing to oplog, this won't
+ * be true.
+ */
bool master;
bool fastsync;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 57b403aa434..f5d0086c1ef 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -113,7 +113,8 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
if (level > 1) {
wassert(!txn->lockState()->isLocked());
- // note: there is no so-style timeout on this connection; perhaps we should have one.
+ // note: there is no so-style timeout on this connection; perhaps we should have
+ // one.
ScopedDbConnection conn(s["host"].valuestr());
DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
index 8742f25e285..bee3fb4f86a 100644
--- a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
+++ b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
@@ -134,8 +134,8 @@ void AlignedBuilder::_malloc(unsigned sz) {
_p._allocationAddress = p;
_p._data = (char*)p;
#elif defined(__linux__)
- // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris.
- // so for now, linux only for this.
+ // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be
+ // tested on solaris. so for now, linux only for this.
void* p = 0;
int res = posix_memalign(&p, Alignment, sz);
massert(13524, "out of memory AlignedBuilder", res == 0);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index b4e42196c99..da0d26a5cbf 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -2382,9 +2382,11 @@ public:
}
// too much work to try to make this happen through inserts and deletes
// we are intentionally manipulating the btree bucket directly here
- BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >(
+ &bt()->keyNode( 1 ).prevChildBucket );
writing(L)->Null();
- writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ writingInt( const_cast< BtreeBucket::Loc& >(
+ bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
BSONObj k = BSON( "a" << toInsert );
Base::insert( k );
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.cpp b/src/mongo/db/storage/mmap_v1/btree/key.cpp
index cbb89d8fab9..2e78e5e008f 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/key.cpp
@@ -201,7 +201,8 @@ int KeyBson::woCompare(const KeyBson& r, const Ordering& o) const {
return oldCompare(_o, r._o, o);
}
-// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a
+// big effort
bool KeyBson::woEqual(const KeyBson& r) const {
return oldCompare(_o, r._o, nullOrdering) == 0;
}
@@ -499,7 +500,8 @@ static int compare(const unsigned char*& l, const unsigned char*& r) {
int llen = binDataCodeToLength(L);
int diff = L - R; // checks length and subtype simultaneously
if (diff) {
- // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit
+ // swap...)
int rlen = binDataCodeToLength(R);
if (llen != rlen)
return llen - rlen;
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.h b/src/mongo/db/storage/mmap_v1/btree/key.h
index 4787d83281a..906ddcc621b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.h
+++ b/src/mongo/db/storage/mmap_v1/btree/key.h
@@ -81,10 +81,11 @@ class KeyV1Owned;
// corresponding to BtreeData_V1
class KeyV1 {
- void operator=(
- const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
- KeyV1(
- const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ // disallowed just to make people be careful as we don't own the buffer
+ void operator=(const KeyV1&);
+ // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ KeyV1(const KeyV1Owned&);
+
public:
KeyV1() {
_keyData = 0;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.h b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
index f93112de47f..0e382beade2 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
@@ -84,12 +84,12 @@ public:
return buf;
}
- /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
- (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
- file support. */
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more
+ * than 10 indexes (more than 10 IndexDetails). It's a bit hacky because of this late addition
+ * with backward file support. */
std::string extraName(int i) const;
- bool isExtra()
- const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ bool isExtra() const;
enum MaxNsLenValue {
// Maximum possible length of name any namespace, including special ones like $extra.
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index 5002bf267c7..a6604e1fb04 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -97,8 +97,8 @@ public:
DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
DiskLoc capFirstNewRecord;
- unsigned short
- _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short _dataFileVersion;
unsigned short _indexFileVersion;
unsigned long long multiKeyIndexBits;
@@ -115,8 +115,8 @@ public:
DiskLoc deletedListLarge[LargeBuckets];
// Think carefully before using this. We need at least 8 bytes reserved to leave room for a
- // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still _reservedA
- // above, but these are the final two reserved 8-byte regions.
+ // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still
+ // _reservedA above, but these are the final two reserved 8-byte regions.
char _reserved[8];
/*-------- end data 496 bytes */
public:
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index ed6e08e7931..a9252188d43 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -126,8 +126,10 @@ class DataFileHeader {
public:
DataFileVersion version;
int fileLength;
- DiskLoc
- unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ /**
+ * unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more
+ */
+ DiskLoc unused;
int unusedLength;
DiskLoc freeListStart;
DiskLoc freeListEnd;
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 662daf074d5..5a675b40b92 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -52,12 +52,15 @@ class BtreeBucket;
(such as adding a virtual function)
*/
class DiskLoc {
- int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
+ // this will be volume, file #, etc. but is a logical value could be anything depending on
+ // storage engine
+ int _a;
int ofs;
public:
enum SentinelValues {
- /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
NullOfs = -1,
// Caps the number of files that may be allocated in a database, allowing about 32TB of
@@ -74,15 +77,16 @@ public:
Null();
}
- // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and extent
- // headers must precede Records in a file.
+ // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and
+ // extent headers must precede Records in a file.
static DiskLoc min() {
return DiskLoc(0, 0);
}
// Maximum allowed DiskLoc.
- // No MmapV1RecordHeader may begin at this location because the minimum size of a MmapV1RecordHeader is larger than
- // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ // No MmapV1RecordHeader may begin at this location because the minimum size of a
+ // MmapV1RecordHeader is larger than one byte. Also, the last bit is not able to be used
+ // because mmapv1 uses that for "used".
static DiskLoc max() {
return DiskLoc(0x7fffffff, 0x7ffffffe);
}
@@ -96,8 +100,9 @@ public:
}
DiskLoc& Null() {
_a = -1;
- ofs =
- 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
+ ofs = 0;
return *this;
}
void assertOk() const {
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 21c729eea17..a17a7a80d51 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -34,19 +34,21 @@
we could be in read lock for this
for very large objects write directly to redo log in situ?
WRITETOJOURNAL
- we could be unlocked (the main db lock that is...) for this, with sufficient care, but there is some complexity
- have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that).
- for now (1.7.5/1.8.0) we are in read lock which is not ideal.
+ we could be unlocked (the main db lock that is...) for this, with sufficient care, but there
+ is some complexity have to handle falling behind which would use too much ram (going back
+ into a read lock would suffice to stop that). for now (1.7.5/1.8.0) we are in read lock which
+ is not ideal.
WRITETODATAFILES
- actually write to the database data files in this phase. currently done by memcpy'ing the writes back to
- the non-private MMF. alternatively one could write to the files the traditional way; however the way our
- storage engine works that isn't any faster (actually measured a tiny bit slower).
+ actually write to the database data files in this phase. currently done by memcpy'ing the
+ writes back to the non-private MMF. alternatively one could write to the files the
+ traditional way; however the way our storage engine works that isn't any faster (actually
+ measured a tiny bit slower).
REMAPPRIVATEVIEW
- we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
- remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
- to be too frequent.
- there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
- be required. so doing these remaps fractionally is helpful.
+ we could in a write lock quickly flip readers back to the main view, then stay in read lock
+ and do our real remapping. with many files (e.g., 1000), remapping could be time consuming
+ (several ms), so we don't want to be too frequent. there could be a slow down immediately
+ after remapping as fresh copy-on-writes for commonly written pages will
+ be required. so doing these remaps fractionally is helpful.
mutexes:
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalformat.h b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
index 3c31c2686dd..964c0b79b9b 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalformat.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
@@ -49,11 +49,12 @@ struct JHeader {
JHeader() {}
JHeader(std::string fname);
- char magic
- [2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
+ // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or
+ // something...
+ char magic[2];
-// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
-// that. simply incrementing the version # is safe on a fwd basis.
+// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were
+// near that. simply incrementing the version # is safe on a fwd basis.
#if defined(_NOCOMPRESS)
enum { CurrentVersion = 0x4148 };
#else
@@ -62,15 +63,15 @@ struct JHeader {
unsigned short _version;
// these are just for diagnostic ease (make header more useful as plain text)
- char n1; // '\n'
- char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
- char n2; // '\n'
- char dbpath
- [128]; // path/filename of this file for human reading and diagnostics. not used by code.
- char n3, n4; // '\n', '\n'
+ char n1; // '\n'
+ char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
+ char n2; // '\n'
+ char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used
+ // by code.
+ char n3, n4; // '\n', '\n'
- unsigned long long
- fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+ unsigned long long fileId; // unique identifier that will be in each JSectHeader.
+ // important as we recycle prealloced files
char reserved3[8026]; // 8KB total for the file header
char txt2[2]; // "\n\n" at the end
@@ -112,7 +113,8 @@ public:
};
/** an individual write operation within a group commit section. Either the entire section should
- be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
+ be applied, or nothing. (We check the md5 for the whole section before doing anything on
+ recovery.)
*/
struct JEntry {
enum OpCodes {
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
index 86a2d19de97..77e79ccb8d1 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
@@ -108,8 +108,8 @@ private:
static void preFlush();
static void postFlush();
unsigned long long _preFlushTime;
- unsigned long long
- _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ unsigned long long _lastFlushTime;
bool _writeToLSNNeeded;
void updateLSNFile();
};
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index dc9d7fb2b7a..10651cc1ae8 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -68,7 +68,10 @@ static DurableMappedFile* findMMF_inlock(void* ptr, size_t& ofs) {
DurableMappedFile* f = privateViews.find_inlock(ptr, ofs);
if (f == 0) {
error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
- printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
+
+ // we want a stack trace and the assert below didn't print a trace once in the real world
+ // - not sure why
+ printStackTrace();
stringstream ss;
ss << "view pointer cannot be resolved " << std::hex << (size_t)ptr;
journalingFailure(ss.str().c_str()); // asserts, which then abends
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index a6958ad1aec..209acc92cea 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -207,7 +207,8 @@ public:
_entries->skip(len + 1); // skip '\0' too
_entries->read(lenOrOpCode); // read this for the fall through
}
- // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
+ // fall through as a basic operation always follows jdbcontext, and we don't have
+ // anything to return yet
default:
// fall through
@@ -517,7 +518,8 @@ bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
return true;
}
} catch (...) {
- // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ // if something weird like a permissions problem keep going so the massert down below can
+ // happen (presumably)
log() << "recover exception checking filesize" << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h
index 8ec6f8c024f..0b3daf7f021 100644
--- a/src/mongo/db/storage/mmap_v1/dur_stats.h
+++ b/src/mongo/db/storage/mmap_v1/dur_stats.h
@@ -33,9 +33,11 @@
namespace mongo {
namespace dur {
-/** journaling stats. the model here is that the commit thread is the only writer, and that reads are
- uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
-*/
+/**
+ * journaling stats. the model here is that the commit thread is the only writer, and that reads
+ * are uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter
+ * overhead.
+ */
struct Stats {
struct S {
std::string _CSVHeader() const;
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index fad28753372..967f1c92a43 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -28,9 +28,11 @@
* it in the license file.
*/
-/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this.
-*/
+/**
+ * this module adds some of our layers atop memory mapped files - specifically our handling of
+ * private views & such if you don't care about journaling/durability (temp sort files & such) use
+ * MemoryMappedFile class, not this.
+ */
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
@@ -275,9 +277,8 @@ bool DurableMappedFile::finishOpening() {
"(look in log for "
"more information)");
}
- privateViews.add_inlock(
- _view_private,
- this); // note that testIntent builds use this, even though it points to view_write then...
+ // note that testIntent builds use this, even though it points to view_write then...
+ privateViews.add_inlock(_view_private, this);
} else {
_view_private = _view_write;
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index 02906f112fe..2697613890b 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -37,10 +37,11 @@
namespace mongo {
-/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such.
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
- not this.
-*/
+/**
+ * DurableMappedFile adds some layers atop memory mapped files - specifically our handling of
+ * private views & such. if you don't care about journaling/durability (temp sort files & such) use
+ * MemoryMappedFile class, not this.
+ */
class DurableMappedFile : private MemoryMappedFile {
protected:
virtual void* viewForFlushing() {
@@ -276,6 +277,7 @@ inline void PointerToDurableMappedFile::makeWritable(void* privateView, unsigned
inline void PointerToDurableMappedFile::makeWritable(void* _p, unsigned len) {}
#endif
-// allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object
+// allows a pointer into any private view of a DurableMappedFile to be resolved to the
+// DurableMappedFile object
extern PointerToDurableMappedFile privateViews;
}
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index 8efd7720c3e..0ea1949ad12 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -135,9 +135,9 @@ bool FileCreatedOp::needFilesClosed() {
}
void FileCreatedOp::replay() {
- // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
- // or rewrite at least, even if it were the right length. perhaps one day we should change that
- // although easier to avoid defects if we assume it is zeros perhaps.
+ // i believe the code assumes new files are filled with zeros. thus we have to recreate the
+ // file, or rewrite at least, even if it were the right length. perhaps one day we should
+ // change that although easier to avoid defects if we assume it is zeros perhaps.
string full = _p.asFullPath();
if (boost::filesystem::exists(full)) {
try {
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index a798f210616..50ddc33318a 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -43,9 +43,9 @@ namespace dur {
/** DurOp - Operations we journal that aren't just basic writes.
*
- * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
- * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
- * them (we don't want a vtable for example there).
+ * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct
+ * dur::WriteIntent. We don't make WriteIntent inherit from DurOp to keep it as lean as possible as
+ * there will be millions of them (we don't want a vtable for example there).
*
* For each op we want to journal, we define a subclass.
*/
diff --git a/src/mongo/db/storage/mmap_v1/extent.h b/src/mongo/db/storage/mmap_v1/extent.h
index 9d6d3935346..16af89fb42b 100644
--- a/src/mongo/db/storage/mmap_v1/extent.h
+++ b/src/mongo/db/storage/mmap_v1/extent.h
@@ -42,7 +42,8 @@ namespace mongo {
/* extents are datafile regions where all the records within the region
belong to the same namespace.
-(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big
+ DeletedRecord
(11:12:55 AM) dm10gen: and that is placed on the free list
*/
#pragma pack(1)
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 6151f8e11a2..052634d639b 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -106,9 +106,9 @@ public:
/**
* @param loc - has to be for a specific MmapV1RecordHeader
* Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an
+ * offset from an extent. This intrinsically links an original record store to the original
+ * extent manager.
*/
virtual MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const = 0;
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 0500ad43a83..f033e1c6a5a 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -317,7 +317,8 @@ void FileAllocator::ensureLength(int fd, long size) {
void FileAllocator::checkFailure() {
if (_failed) {
- // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
+ // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack
+ // trace
msgassertedNoTrace(12520, "new file allocation failure");
}
}
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index 0f21961d459..d886eaf8b45 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -121,7 +121,8 @@ public:
sleepmillis(4);
}
long long y = t2.micros() - 4 * N * 1000;
- // not really trusting the timer granularity on all platforms so whichever is higher of x and y
+ // not really trusting the timer granularity on all platforms so whichever is higher
+ // of x and y
bb[pass].append("8KBWithPauses", max(x, y) / (N * 1000.0));
}
{
diff --git a/src/mongo/db/storage/mmap_v1/logfile.h b/src/mongo/db/storage/mmap_v1/logfile.h
index 4a3bb5535e2..abfb875ee4b 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.h
+++ b/src/mongo/db/storage/mmap_v1/logfile.h
@@ -51,7 +51,8 @@ public:
*/
void synchronousAppend(const void* buf, size_t len);
- /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
+ /** write at specified offset. must be aligned. noreturn until physically written. thread safe
+ * */
void writeAt(unsigned long long offset, const void* _bug, size_t _len);
void readAt(unsigned long long offset, void* _buf, size_t _len);
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index ae9a0796a4b..6413dc26127 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -123,8 +123,10 @@ public:
template <class F>
static void forEach(F fun);
- /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
-*/
+ /**
+ * note: you need to be in mmmutex when using this. forEach (above) handles that for you
+ * automatically.
+ */
static std::set<MongoFile*>& getAllFiles();
// callbacks if you need them
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 69d80422e66..d7c44aabfab 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -444,8 +444,8 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
break;
}
if (t.seconds() >= 2) {
- // have spent lots of time in write lock, and we are in [low,high], so close enough
- // could come into play if extent freelist is very long
+ // have spent lots of time in write lock, and we are in [low,high], so close
+ // enough could come into play if extent freelist is very long
break;
}
} else {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index 1f7a0963aa1..a2f2931e1b4 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -113,9 +113,9 @@ public:
/**
* @param loc - has to be for a specific MmapV1RecordHeader
* Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an
+ * offset from an extent. This intrinsically links an original record store to the original
+ * extent manager.
*/
MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const;
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index 88abedd9c77..aba533e5844 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -228,7 +228,8 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length,
verify(fd == 0 && len == 0); // can't open more than once
setFilename(filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
- /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
+ /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
+ * perhaps. */
char filename[256];
strncpy(filename, filenameIn, 255);
filename[255] = 0;
diff --git a/src/mongo/db/storage/mmap_v1/record.h b/src/mongo/db/storage/mmap_v1/record.h
index 0f3f9ebcdd4..a37d49101b1 100644
--- a/src/mongo/db/storage/mmap_v1/record.h
+++ b/src/mongo/db/storage/mmap_v1/record.h
@@ -42,13 +42,16 @@ class DeletedRecord;
/* MmapV1RecordHeader is a record in a datafile. DeletedRecord is similar but for deleted space.
*11:03:20 AM) dm10gen: regarding extentOfs...
-(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and
+ DeleteRecords
(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
-(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent
+ address, we keep just the offset
(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how MmapV1RecordHeader::myExtent() works
-(11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its extentOfs then
+(11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its
+ extentOfs then
*/
#pragma pack(1)
class MmapV1RecordHeader {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index cc8cf582ffe..0e7d667f84f 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -788,8 +788,8 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
if (loc.questionable()) {
if (isCapped() && !loc.isValid() && i == 1) {
- /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
- see comments in namespace.h
+ /* the constructor for NamespaceDetails intentionally sets
+ * deletedList[1] to invalid see comments in namespace.h
*/
break;
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index 5948553b9af..aa161cae41e 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -300,8 +300,8 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
fassert(17437, sourceExtent->validates(extentLoc));
{
- // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we first
- // page in the whole Extent sequentially.
+ // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we
+ // first page in the whole Extent sequentially.
// TODO benchmark on slow storage to verify this is measurably faster.
log() << "compact paging in len=" << sourceExtent->length / 1000000.0 << "MB" << endl;
Timer t;