summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-19 10:57:36 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:04 -0400
commit6f6fa5a63d482b0dc117eb2ac21cf096deb5a6f3 (patch)
treeb76c2a4dfc7f45eb25dd62cb3ffe89ea448d9e0e /src
parent9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (diff)
downloadmongo-6f6fa5a63d482b0dc117eb2ac21cf096deb5a6f3.tar.gz
SERVER-18978: Clang-Format - Fix comment word wrapping indentation
Diffstat (limited to 'src')
-rw-r--r--src/mongo/bson/bsonelement.cpp3
-rw-r--r--src/mongo/bson/bsonelement.h3
-rw-r--r--src/mongo/bson/bsonmisc.h6
-rw-r--r--src/mongo/bson/bsonobj.cpp12
-rw-r--r--src/mongo/bson/bsonobj.h33
-rw-r--r--src/mongo/bson/bsonobjbuilder.h14
-rw-r--r--src/mongo/bson/util/builder.h13
-rw-r--r--src/mongo/client/cyrus_sasl_client_session.cpp4
-rw-r--r--src/mongo/client/dbclient.cpp10
-rw-r--r--src/mongo/client/dbclient_rs.h6
-rw-r--r--src/mongo/client/dbclientinterface.h106
-rw-r--r--src/mongo/client/examples/mongoperf.cpp8
-rw-r--r--src/mongo/client/parallel.cpp41
-rw-r--r--src/mongo/crypto/tom/tomcrypt_cfg.h9
-rw-r--r--src/mongo/crypto/tom/tomcrypt_custom.h3
-rw-r--r--src/mongo/crypto/tom/tomcrypt_hash.h3
-rw-r--r--src/mongo/db/auth/authorization_session.h3
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp3
-rw-r--r--src/mongo/db/dbhelpers.h4
-rw-r--r--src/mongo/db/dbmessage.h3
-rw-r--r--src/mongo/db/geo/s2.h4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp3
-rw-r--r--src/mongo/db/mongod_options.cpp4
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/db/prefetch.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp4
-rw-r--r--src/mongo/db/query/planner_access.h4
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp9
-rw-r--r--src/mongo/db/repl/bgsync.cpp3
-rw-r--r--src/mongo/db/repl/master_slave.cpp24
-rw-r--r--src/mongo/db/repl/master_slave.h12
-rw-r--r--src/mongo/db/repl/oplog.cpp4
-rw-r--r--src/mongo/db/repl/repl_settings.h5
-rw-r--r--src/mongo/db/repl/replication_info.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/aligned_builder.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h21
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp24
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalformat.h28
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalimpl.h4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_stats.h8
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/extent_manager.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_windows.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/record.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp4
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp8
-rw-r--r--src/mongo/dbtests/jstests.cpp3
-rw-r--r--src/mongo/dbtests/namespacetests.cpp546
-rw-r--r--src/mongo/dbtests/perftests.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp4
-rw-r--r--src/mongo/dbtests/sharding.cpp25
-rw-r--r--src/mongo/dbtests/threadedtests.cpp24
-rw-r--r--src/mongo/s/balance.cpp4
-rw-r--r--src/mongo/s/balance.h10
-rw-r--r--src/mongo/s/catalog/legacy/distlock.cpp74
-rw-r--r--src/mongo/s/catalog/legacy/distlock.h16
-rw-r--r--src/mongo/s/catalog/type_changelog.h4
-rw-r--r--src/mongo/s/chunk.cpp3
-rw-r--r--src/mongo/s/chunk.h8
-rw-r--r--src/mongo/s/chunk_manager.h3
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_db_stats_cmd.cpp3
-rw-r--r--src/mongo/s/commands/commands_public.cpp3
-rw-r--r--src/mongo/s/config.cpp14
-rw-r--r--src/mongo/s/d_migrate.cpp27
-rw-r--r--src/mongo/s/d_split.cpp42
-rw-r--r--src/mongo/s/d_state.h4
-rw-r--r--src/mongo/s/grid.cpp4
-rw-r--r--src/mongo/s/server.cpp3
-rw-r--r--src/mongo/scripting/bson_template_evaluator.h3
-rw-r--r--src/mongo/scripting/bson_template_evaluator_test.cpp3
-rw-r--r--src/mongo/shell/linenoise.cpp61
-rw-r--r--src/mongo/shell/linenoise_utf8.cpp17
-rw-r--r--src/mongo/shell/linenoise_utf8.h35
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp5
-rw-r--r--src/mongo/stdx/functional.h3
-rw-r--r--src/mongo/util/assert_util.h15
-rw-r--r--src/mongo/util/background.h4
-rw-r--r--src/mongo/util/concurrency/value.h3
-rw-r--r--src/mongo/util/debugger.cpp3
-rw-r--r--src/mongo/util/descriptive_stats-inl.h10
-rw-r--r--src/mongo/util/moveablebuffer.h11
-rw-r--r--src/mongo/util/net/listen.h3
-rw-r--r--src/mongo/util/net/message.h6
-rw-r--r--src/mongo/util/net/message_port.cpp3
-rw-r--r--src/mongo/util/net/message_port.h7
-rw-r--r--src/mongo/util/net/miniwebserver.h17
-rw-r--r--src/mongo/util/ntservice.cpp16
-rw-r--r--src/mongo/util/options_parser/option_description.h5
-rw-r--r--src/mongo/util/options_parser/options_parser.h3
-rw-r--r--src/mongo/util/processinfo_linux.cpp31
-rw-r--r--src/mongo/util/processinfo_windows.cpp4
-rw-r--r--src/mongo/util/progress_meter.h4
-rw-r--r--src/mongo/util/safe_num.cpp8
-rw-r--r--src/mongo/util/startup_test.h15
-rw-r--r--src/mongo/util/winutil.h3
117 files changed, 989 insertions, 782 deletions
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index 799da76b11d..72d104b15d6 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -843,7 +843,8 @@ int compareElementValues(const BSONElement& l, const BSONElement& r) {
case Bool:
return *l.value() - *r.value();
case bsonTimestamp:
- // unsigned compare for timestamps - note they are not really dates but (ordinal + time_t)
+ // unsigned compare for timestamps - note they are not really dates but (ordinal +
+ // time_t)
if (l.date() < r.date())
return -1;
return l.date() == r.date() ? 0 : 1;
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index 9ee5513ddf9..92ddf87600a 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -685,7 +685,8 @@ inline double BSONElement::numberDouble() const {
}
}
-/** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if another numeric type. */
+/** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if
+ * another numeric type. */
inline int BSONElement::numberInt() const {
switch (type()) {
case NumberDouble:
diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h
index baa7abad5e7..d1c1894190a 100644
--- a/src/mongo/bson/bsonmisc.h
+++ b/src/mongo/bson/bsonmisc.h
@@ -83,7 +83,8 @@ enum FieldCompareResult {
/** Use BSON_ARRAY macro like BSON macro, but without keys
- BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ BSONArray arr = BSON_ARRAY( "hello" << 1 <<
+ BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
*/
#define BSON_ARRAY(x) ((::mongo::BSONArrayBuilder() << x).arr())
@@ -264,7 +265,8 @@ private:
};
/**
- used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
+ used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory
+ usage
*/
class BSONSizeTracker {
public:
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index 0c9f8f01636..2cd2ef64bde 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -177,12 +177,12 @@ int BSONObj::woCompare(const BSONObj& r, const BSONObj& idxKey, bool considerFie
int x;
/*
- if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
- l.type() == String && r.type() == String ) {
- // note: no negative support yet, as this is just sort of a POC
- x = _stricmp(l.valuestr(), r.valuestr());
- }
- else*/ {
+ if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
+ l.type() == String && r.type() == String ) {
+ // note: no negative support yet, as this is just sort of a POC
+ x = _stricmp(l.valuestr(), r.valuestr());
+ }
+ else*/ {
x = l.woCompare(r, considerFieldName);
if (ordered && o.number() < 0)
x = -x;
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index 269278bef92..466ee1521f1 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -147,22 +147,22 @@ public:
A BSONObj can use a buffer it "owns" or one it does not.
OWNED CASE
- If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by assignment).
- In this case the buffer is basically implemented as a shared_ptr.
+ If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by
+ assignment). In this case the buffer is basically implemented as a shared_ptr.
Since BSONObj's are typically immutable, this works well.
UNOWNED CASE
- A BSONObj can also point to BSON data in some other data structure it does not "own" or free later.
- For example, in a memory mapped file. In this case, it is important the original data stays in
- scope for as long as the BSONObj is in use. If you think the original data may go out of scope,
- call BSONObj::getOwned() to promote your BSONObj to having its own copy.
+ A BSONObj can also point to BSON data in some other data structure it does not "own" or free
+ later. For example, in a memory mapped file. In this case, it is important the original data
+ stays in scope for as long as the BSONObj is in use. If you think the original data may go
+ out of scope, call BSONObj::getOwned() to promote your BSONObj to having its own copy.
On a BSONObj assignment, if the source is unowned, both the source and dest will have unowned
pointers to the original buffer after the assignment.
If you are not sure about ownership but need the buffer to last as long as the BSONObj, call
- getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a malloc
- and memcpy will result.
+ getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a
+ malloc and memcpy will result.
Most ways to create BSONObj's create 'owned' variants. Unowned versions can be created with:
(1) specifying true for the ifree parameter in the constructor
@@ -240,8 +240,8 @@ public:
/** Get several fields at once. This is faster than separate getField() calls as the size of
elements iterated can then be calculated only once each.
@param n number of fieldNames, and number of elements in the fields array
- @param fields if a field is found its element is stored in its corresponding position in this array.
- if not found the array element is unchanged.
+ @param fields if a field is found its element is stored in its corresponding position in
+ this array. if not found the array element is unchanged.
*/
void getFields(unsigned n, const char** fieldNames, BSONElement* fields) const;
@@ -459,9 +459,9 @@ public:
return BSONElement(objdata() + 4);
}
- /** faster than firstElement().fieldName() - for the first element we can easily find the fieldname without
- computing the element size.
- */
+ /** faster than firstElement().fieldName() - for the first element we can easily find the
+ * fieldname without computing the element size.
+ */
const char* firstElementFieldName() const {
const char* p = objdata() + 4;
return *p == EOO ? "" : p + 1;
@@ -655,12 +655,15 @@ public:
return _pos < _theend;
}
- /** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
+ /** @return true if more elements exist to be enumerated INCLUDING the EOO element which is
+ * always at the end. */
bool moreWithEOO() {
return _pos <= _theend;
}
- /** @return the next element in the object. For the final element, element.eoo() will be true. */
+ /**
+ * @return the next element in the object. For the final element, element.eoo() will be true.
+ */
BSONElement next(bool checkEnd) {
verify(_pos <= _theend);
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index 76ed3f60ec8..87c9f3f876a 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -78,7 +78,8 @@ public:
}
/** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
- * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for example.
+ * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for
+ * example.
*/
BSONObjBuilder(BufBuilder& baseBuilder)
: _b(baseBuilder),
@@ -129,16 +130,16 @@ public:
/** append element to the object we are building */
BSONObjBuilder& append(const BSONElement& e) {
- verify(
- !e.eoo()); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify(!e.eoo());
_b.appendBuf((void*)e.rawdata(), e.size());
return *this;
}
/** append an element but with a new name */
BSONObjBuilder& appendAs(const BSONElement& e, StringData fieldName) {
- verify(
- !e.eoo()); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify(!e.eoo());
_b.appendNum((char)e.type());
_b.appendStr(fieldName);
_b.appendBuf((void*)e.value(), e.valuesize());
@@ -677,7 +678,8 @@ public:
return false;
}
- /** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
+ /** @return true if we are using our own bufbuilder, and not an alternate that was given to us
+ * in our constructor */
bool owned() const {
return &_b == &_buf;
}
diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h
index bf0668a6da5..5878a2ed386 100644
--- a/src/mongo/bson/util/builder.h
+++ b/src/mongo/bson/util/builder.h
@@ -46,9 +46,9 @@
namespace mongo {
/* Accessing unaligned doubles on ARM generates an alignment trap and aborts with SIGBUS on Linux.
- Wrapping the double in a packed struct forces gcc to generate code that works with unaligned values too.
- The generated code for other architectures (which already allow unaligned accesses) is the same as if
- there was a direct pointer access.
+ Wrapping the double in a packed struct forces gcc to generate code that works with unaligned
+ values too. The generated code for other architectures (which already allow unaligned accesses)
+ is the same as if there was a direct pointer access.
*/
struct PackedDouble {
double d;
@@ -58,8 +58,8 @@ struct PackedDouble {
/* Note the limit here is rather arbitrary and is simply a standard. generally the code works
with any object that fits in ram.
- Also note that the server has some basic checks to enforce this limit but those checks are not exhaustive
- for example need to check for size too big after
+ Also note that the server has some basic checks to enforce this limit but those checks are not
+ exhaustive for example need to check for size too big after
update $push (append) operation
various db.eval() type operations
*/
@@ -165,7 +165,8 @@ public:
}
/** leave room for some stuff later
- @return point to region that was skipped. pointer may change later (on realloc), so for immediate use only
+ @return point to region that was skipped. pointer may change later (on realloc), so for
+ immediate use only
*/
char* skip(int n) {
return grow(n);
diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp
index c47d912c7c1..22875070e23 100644
--- a/src/mongo/client/cyrus_sasl_client_session.cpp
+++ b/src/mongo/client/cyrus_sasl_client_session.cpp
@@ -149,8 +149,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CyrusSaslClientContext,
}
/**
- * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow the Cyrus SASL
- * library to query for the authentication id and other simple string configuration parameters.
+ * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow the Cyrus
+ * SASL library to query for the authentication id and other simple string configuration parameters.
*
* Note that in Mongo, the authentication and authorization ids (authid and authzid) are always
* the same. These correspond to SASL_CB_AUTHNAME and SASL_CB_USER.
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index f1ebb144944..53e0ddc548d 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -851,17 +851,17 @@ bool DBClientWithCommands::exists(const string& ns) {
void DBClientConnection::_auth(const BSONObj& params) {
if (autoReconnect) {
- /* note we remember the auth info before we attempt to auth -- if the connection is broken, we will
- then have it for the next autoreconnect attempt.
- */
+ /* note we remember the auth info before we attempt to auth -- if the connection is broken,
+ * we will then have it for the next autoreconnect attempt.
+ */
authCache[params[saslCommandUserDBFieldName].str()] = params.getOwned();
}
DBClientBase::_auth(params);
}
-/** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
- query() and iterate the cursor.
+/** query N objects from the database into an array. makes sense mostly when you want a small
+ * number of results. if a huge number, use query() and iterate the cursor.
*/
void DBClientInterface::findN(vector<BSONObj>& out,
const string& ns,
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 3e5a9f07dd2..203e5eb003d 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -55,7 +55,8 @@ public:
using DBClientBase::update;
using DBClientBase::remove;
- /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
+ /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet
+ * connections. */
DBClientReplicaSet(const std::string& name,
const std::vector<HostAndPort>& servers,
double so_timeout = 0);
@@ -134,7 +135,8 @@ public:
bool* retry = NULL,
std::string* targetHost = NULL);
- /* this is the callback from our underlying connections to notify us that we got a "not master" error.
+ /* this is the callback from our underlying connections to notify us that we got a "not master"
+ * error.
*/
void isntMaster();
diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h
index a612777b603..bcbaa42d1da 100644
--- a/src/mongo/client/dbclientinterface.h
+++ b/src/mongo/client/dbclientinterface.h
@@ -46,13 +46,13 @@ namespace mongo {
/** the query field 'options' can have these bits set: */
enum QueryOptions {
- /** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
- the final object's position. you can resume using the cursor later, from where it was located,
- if more data were received. Set on dbQuery and dbGetMore.
+ /** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor
+ * marks the final object's position. you can resume using the cursor later, from where it was
+ located, if more data were received. Set on dbQuery and dbGetMore.
like any "latent cursor", the cursor may become invalid at some point -- for example if that
- final object it references were deleted. Thus, you should be prepared to requery if you get back
- ResultFlag_CursorNotFound.
+ final object it references were deleted. Thus, you should be prepared to requery if you get
+ back ResultFlag_CursorNotFound.
*/
QueryOption_CursorTailable = 1 << 1,
@@ -70,21 +70,24 @@ enum QueryOptions {
// an extended period of time.
QueryOption_OplogReplay = 1 << 3,
- /** The server normally times out idle cursors after an inactivity period to prevent excess memory uses
+ /** The server normally times out idle cursors after an inactivity period to prevent excess
+ * memory uses
Set this option to prevent that.
*/
QueryOption_NoCursorTimeout = 1 << 4,
- /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
- than returning no data. After a timeout period, we do return as normal.
+ /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while
+ * rather than returning no data. After a timeout period, we do return as normal.
*/
QueryOption_AwaitData = 1 << 5,
- /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
- will fully read all data queried. Faster when you are pulling a lot of data and know you want to
- pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
+ /** Stream the data down full blast in multiple "more" packages, on the assumption that the
+ * client will fully read all data queried. Faster when you are pulling a lot of data and know
+ * you want to pull it all down. Note: it is not allowed to not read all the data unless you
+ * close the connection.
- Use the query( stdx::function<void(const BSONObj&)> f, ... ) version of the connection's query()
+ Use the query( stdx::function<void(const BSONObj&)> f, ... ) version of the connection's
+ query()
method, and it will take care of all the details for you.
*/
QueryOption_Exhaust = 1 << 6,
@@ -151,7 +154,8 @@ enum ReservedOptions {
class DBClientCursor;
class DBClientCursorBatchIterator;
-/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
+/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a
+ * Query object.
Examples:
QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
QUERY( "age" << GT << 30 << LT << 50 )
@@ -169,7 +173,8 @@ public:
Query(const char* json);
/** Add a sort (ORDER BY) criteria to the query expression.
- @param sortPattern the sort order template. For example to order by name ascending, time descending:
+ @param sortPattern the sort order template. For example to order by name ascending, time
+ descending:
{ name : 1, ts : -1 }
i.e.
BSON( "name" << 1 << "ts" << -1 )
@@ -205,14 +210,16 @@ public:
*/
Query& maxKey(const BSONObj& val);
- /** Return explain information about execution of this query instead of the actual query results.
- Normally it is easier to use the mongo shell to run db.find(...).explain().
- */
+ /** Return explain information about execution of this query instead of the actual query
+ * results.
+ * Normally it is easier to use the mongo shell to run db.find(...).explain().
+ */
Query& explain();
- /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
- present at both the start and end of the query's execution (if an object is new during the query, or deleted during
- the query, it may or may not be returned, even with snapshot mode).
+ /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or
+ * objects missed, which were present at both the start and end of the query's execution (if an
+ * object is new during the query, or deleted during the query, it may or may not be returned,
+ * even with snapshot mode).
Note that short query responses (less than 1MB) are always effectively snapshotted.
@@ -387,7 +394,8 @@ std::string nsGetCollection(const std::string& ns);
class DBConnector {
public:
virtual ~DBConnector() {}
- /** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
+ /** actualServer is set to the actual server where they call went if there was a choice
+ * (SlaveOk) */
virtual bool call(Message& toSend,
Message& response,
bool assertOk = true,
@@ -454,8 +462,8 @@ public:
const BSONObj* fieldsToReturn = 0,
int queryOptions = 0);
- /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
- query() and iterate the cursor.
+ /** query N objects from the database into an array. makes sense mostly when you want a small
+ * number of results. if a huge number, use query() and iterate the cursor.
*/
void findN(std::vector<BSONObj>& out,
const std::string& ns,
@@ -549,8 +557,8 @@ public:
@param dbname database name. Use "admin" for global administrative commands.
@param cmd the command object to execute. For example, { ismaster : 1 }
- @param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
- set.
+ @param info the result object the database returns. Typically has { ok : ..., errmsg : ... }
+ fields set.
@param options see enum QueryOptions - normally not needed to run a command
@param auth if set, the BSONObj representation will be appended to the command object sent
@@ -589,9 +597,10 @@ public:
/** Authorize access to a particular database.
Authentication is separate for each database on the server -- you may authenticate for any
number of databases on a single connection.
- The "admin" database is special and once authenticated provides access to all databases on the
- server.
- @param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
+ The "admin" database is special and once authenticated provides access to all databases on
+ the server.
+ @param digestPassword if password is plain text, set this to true. otherwise assumed
+ to be pre-digested
@param[out] authLevel level of authentication for the given user
@return true if successful
*/
@@ -633,8 +642,8 @@ public:
virtual bool isMaster(bool& isMaster, BSONObj* info = 0);
/**
- Create a new collection in the database. Normally, collection creation is automatic. You would
- use this function if you wish to specify special options on creation.
+ Create a new collection in the database. Normally, collection creation is automatic. You
+ would use this function if you wish to specify special options on creation.
If the collection already exists, no action occurs.
@@ -721,9 +730,9 @@ public:
return res;
}
- /** Perform a repair and compaction of the specified database. May take a long time to run. Disk space
- must be available equal to the size of the database while repairing.
- */
+ /** Perform a repair and compaction of the specified database. May take a long time to run.
+ * Disk space must be available equal to the size of the database while repairing.
+ */
bool repairDatabase(const std::string& dbname, BSONObj* info = 0) {
return simpleCommand(dbname, info, "repairDatabase");
}
@@ -733,8 +742,9 @@ public:
Generally, you should dropDatabase() first as otherwise the copied information will MERGE
into whatever data is already present in this database.
- For security reasons this function only works when you are authorized to access the "admin" db. However,
- if you have access to said db, you can copy any database from one place to another.
+ For security reasons this function only works when you are authorized to access the "admin"
+ db. However, if you have access to said db, you can copy any database from one place to
+ another.
TODO: this needs enhancement to be more flexible in terms of security.
This method provides a way to "rename" a database by copying it to a new db name and
@@ -753,15 +763,15 @@ public:
BSONObj* info = 0);
/** Run javascript code on the database server.
- dbname database SavedContext in which the code runs. The javascript variable 'db' will be assigned
- to this database when the function is invoked.
+ dbname database SavedContext in which the code runs. The javascript variable 'db' will be
+ assigned to this database when the function is invoked.
jscode source code for a javascript function.
- info the command object which contains any information on the invocation result including
- the return value and other information. If an error occurs running the jscode, error
- information will be in info. (try "log() << info.toString()")
+ info the command object which contains any information on the invocation result
+ including the return value and other information. If an error occurs running the
+ jscode, error information will be in info. (try "log() << info.toString()")
retValue return value from the jscode function.
- args args to pass to the jscode function. when invoked, the 'args' variable will be defined
- for use by the jscode.
+ args args to pass to the jscode function. when invoked, the 'args' variable will be
+ defined for use by the jscode.
returns true if runs ok.
@@ -796,7 +806,8 @@ public:
return eval(dbname, jscode, info, retValue, &args);
}
- /** eval invocation with one parm to server and one numeric field (either int or double) returned */
+ /** eval invocation with one parm to server and one numeric field (either int or double)
+ * returned */
template <class T, class NumType>
bool eval(const std::string& dbname, const std::string& jscode, T parm1, NumType& ret) {
BSONObj info;
@@ -836,7 +847,8 @@ public:
@param ns collection to be indexed
@param keys the "key pattern" for the index. e.g., { name : 1 }
@param unique if true, indicates that key uniqueness should be enforced for this index
- @param name if not specified, it will be created from the keys automatically (which is recommended)
+ @param name if not specified, it will be created from the keys automatically (which is
+ recommended)
@param background build index in the background (see mongodb docs for details)
@param v index version. leave at default value. (unit tests set this parameter.)
@param ttl. The value of how many seconds before data should be removed from a collection.
@@ -996,7 +1008,8 @@ public:
to specify a sort order.
@param nToReturn n to return (i.e., limit). 0 = unlimited
@param nToSkip start with the nth item
- @param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields
+ @param fieldsToReturn optional template of which fields to select. if unspecified,
+ returns all fields
@param queryOptions see options enum at top of this file
@return cursor. 0 if error (connection failure)
@@ -1132,7 +1145,8 @@ public:
If autoReconnect is true, you can try to use the DBClientConnection even when
false was returned -- it will try to connect again.
- @param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ @param serverHostname host to connect to. can include port number ( 127.0.0.1 ,
+ 127.0.0.1:5555 )
*/
void connect(const std::string& serverHostname) {
std::string errmsg;
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
index 1681e8a9113..56a06205485 100644
--- a/src/mongo/client/examples/mongoperf.cpp
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -69,7 +69,8 @@ unsigned long long len; // file len
const unsigned PG = 4096;
unsigned nThreadsRunning = 0;
-// as this is incremented A LOT, at some point this becomes a bottleneck if very high ops/second (in cache) things are happening.
+// as this is incremented A LOT, at some point this becomes a bottleneck if very high ops/second (in
+// cache) things are happening.
AtomicUInt32 iops;
SimpleMutex m;
@@ -192,8 +193,9 @@ void go() {
return;
}
lf = new LogFile(fname, true);
- const unsigned sz = 1024 * 1024 *
- 32; // needs to be big as we are using synchronousAppend. if we used a regular MongoFile it wouldn't have to be
+ // needs to be big as we are using synchronousAppend. if we used a regular MongoFile it
+ // wouldn't have to be
+ const unsigned sz = 1024 * 1024 * 32;
char* buf = (char*)mongoMalloc(sz + 4096);
const char* p = round(buf);
for (unsigned long long i = 0; i < len; i += sz) {
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 939cd253e33..02af92a8812 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -765,14 +765,15 @@ void ParallelSortClusteredCursor::startInit() {
_qSpec.options(), // options
// NtoReturn is weird.
// If zero, it means use default size, so we do that for all cursors
- // If positive, it's the batch size (we don't want this cursor limiting results), that's
- // done at a higher level
- // If negative, it's the batch size, but we don't create a cursor - so we don't want
- // to create a child cursor either.
- // Either way, if non-zero, we want to pull back the batch size + the skip amount as
- // quickly as possible. Potentially, for a cursor on a single shard or if we keep better track of
- // chunks, we can actually add the skip value into the cursor and/or make some assumptions about the
- // return value size ( (batch size + skip amount) / num_servers ).
+ // If positive, it's the batch size (we don't want this cursor limiting
+ // results), that's done at a higher level
+ // If negative, it's the batch size, but we don't create a cursor - so we
+ // don't want to create a child cursor either.
+ // Either way, if non-zero, we want to pull back the batch size + the skip
+ // amount as quickly as possible. Potentially, for a cursor on a single
+ // shard or if we keep better track of chunks, we can actually add the skip
+ // value into the cursor and/or make some assumptions about the return value
+ // size ( (batch size + skip amount) / num_servers ).
_qSpec.ntoreturn() == 0 ? 0 : (_qSpec.ntoreturn() > 0
? _qSpec.ntoreturn() + _qSpec.ntoskip()
: _qSpec.ntoreturn() -
@@ -830,7 +831,8 @@ void ParallelSortClusteredCursor::startInit() {
<< (lazyInit ? "(lazily) " : "(full) ") << "on shard " << shardId
<< ", current connection state is " << mdata.toBSON() << endl;
} catch (StaleConfigException& e) {
- // Our version isn't compatible with the current version anymore on at least one shard, need to retry immediately
+ // Our version isn't compatible with the current version anymore on at least one shard,
+ // need to retry immediately
NamespaceString staleNS(e.getns());
// For legacy reasons, this may not be set in the exception :-(
@@ -1185,7 +1187,8 @@ void ParallelSortClusteredCursor::_oldInit() {
vector<shared_ptr<ShardConnection>> conns;
vector<string> servers;
- // Since we may get all sorts of errors, record them all as they come and throw them later if necessary
+ // Since we may get all sorts of errors, record them all as they come and throw them later if
+ // necessary
vector<string> staleConfigExs;
vector<string> socketExs;
vector<string> otherExs;
@@ -1194,7 +1197,8 @@ void ParallelSortClusteredCursor::_oldInit() {
int retries = -1;
// Loop through all the queries until we've finished or gotten a socket exception on all of them
- // We break early for non-socket exceptions, and socket exceptions if we aren't returning partial results
+ // We break early for non-socket exceptions, and socket exceptions if we aren't returning
+ // partial results
do {
retries++;
@@ -1224,7 +1228,8 @@ void ParallelSortClusteredCursor::_oldInit() {
const string errLoc = " @ " + serverHost;
if (firstPass) {
- // This may be the first time connecting to this shard, if so we can get an error here
+ // This may be the first time connecting to this shard, if so we can get an error
+ // here
try {
conns.push_back(shared_ptr<ShardConnection>(new ShardConnection(
uassertStatusOK(ConnectionString::parse(serverHost)), _ns)));
@@ -1288,10 +1293,10 @@ void ParallelSortClusteredCursor::_oldInit() {
}
}
- // Go through all the potentially started cursors and finish initializing them or log any errors and
- // potentially retry
- // TODO: Better error classification would make this easier, errors are indicated in all sorts of ways
- // here that we need to trap.
+ // Go through all the potentially started cursors and finish initializing them or log any
+ // errors and potentially retry
+ // TODO: Better error classification would make this easier, errors are indicated in all
+ // sorts of ways here that we need to trap.
for (size_t i = 0; i < num; i++) {
const string errLoc = " @ " + serverHosts[i];
@@ -1625,8 +1630,8 @@ bool Future::CommandResult::join(int maxRetries) {
}
}
- // We may not always have a collection, since we don't know from a generic command what collection
- // is supposed to be acted on, if any
+ // We may not always have a collection, since we don't know from a generic command what
+ // collection is supposed to be acted on, if any
if (nsGetCollection(staleNS).size() == 0) {
warning() << "no collection namespace in stale config exception "
<< "for lazy command " << _cmd << ", could not refresh " << staleNS
diff --git a/src/mongo/crypto/tom/tomcrypt_cfg.h b/src/mongo/crypto/tom/tomcrypt_cfg.h
index daae2890d67..c599bab88ca 100644
--- a/src/mongo/crypto/tom/tomcrypt_cfg.h
+++ b/src/mongo/crypto/tom/tomcrypt_cfg.h
@@ -64,11 +64,12 @@ LTC_EXPORT int LTC_CALL XSTRCMP(const char* s1, const char* s2);
#define ARGTYPE 0
#endif
-/* Controls endianess and size of registers. Leave uncommented to get platform neutral [slower] code
+/* Controls endianess and size of registers. Leave uncommented to get platform neutral [slower]
+ * code
*
- * Note: in order to use the optimized macros your platform must support unaligned 32 and 64 bit read/writes.
- * The x86 platforms allow this but some others [ARM for instance] do not. On those platforms you **MUST**
- * use the portable [slower] macros.
+ * Note: in order to use the optimized macros your platform must support unaligned 32 and 64 bit
+ * read/writes. The x86 platforms allow this but some others [ARM for instance] do not. On those
+ * platforms you **MUST** use the portable [slower] macros.
*/
/* detect x86-32 machines somewhat */
diff --git a/src/mongo/crypto/tom/tomcrypt_custom.h b/src/mongo/crypto/tom/tomcrypt_custom.h
index 07d64fc83f6..9d64e630010 100644
--- a/src/mongo/crypto/tom/tomcrypt_custom.h
+++ b/src/mongo/crypto/tom/tomcrypt_custom.h
@@ -410,7 +410,8 @@
/* Debuggers */
-/* define this if you use Valgrind, note: it CHANGES the way SOBER-128 and LTC_RC4 work (see the code) */
+/* define this if you use Valgrind, note: it CHANGES the way SOBER-128 and LTC_RC4 work (see the
+ * code) */
/* #define LTC_VALGRIND */
#endif
diff --git a/src/mongo/crypto/tom/tomcrypt_hash.h b/src/mongo/crypto/tom/tomcrypt_hash.h
index 7060353d4c2..db3cd46c152 100644
--- a/src/mongo/crypto/tom/tomcrypt_hash.h
+++ b/src/mongo/crypto/tom/tomcrypt_hash.h
@@ -199,7 +199,8 @@ extern struct ltc_hash_descriptor {
*/
int (*test)(void);
- /* accelerated hmac callback: if you need to-do multiple packets just use the generic hmac_memory and provide a hash callback */
+ /* accelerated hmac callback: if you need to-do multiple packets just use the generic
+ * hmac_memory and provide a hash callback */
int (*hmac_block)(const unsigned char* key,
unsigned long keylen,
const unsigned char* in,
diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h
index d6fe06e11fa..16c10334011 100644
--- a/src/mongo/db/auth/authorization_session.h
+++ b/src/mongo/db/auth/authorization_session.h
@@ -185,7 +185,8 @@ public:
// ResourcePattern::forDatabaseName(role.getDB()), ActionType::grantAnyRole)
bool isAuthorizedToRevokeRole(const RoleName& role);
- // Utility function for isAuthorizedToChangeOwnPasswordAsUser and isAuthorizedToChangeOwnCustomDataAsUser
+ // Utility function for isAuthorizedToChangeOwnPasswordAsUser and
+ // isAuthorizedToChangeOwnCustomDataAsUser
bool isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType);
// Returns true if the current session is authenticated as the given user and that user
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index 6b8a1762bce..213aa690b4a 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -138,8 +138,8 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
<< ActionType::createCollection // read_write gets this also
<< ActionType::dbStats // clusterMonitor gets this also
<< ActionType::dropCollection
- << ActionType::
- dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
+ << ActionType::dropDatabase // clusterAdmin gets this also TODO(spencer): should
+ // readWriteAnyDatabase?
<< ActionType::dropIndex << ActionType::createIndex << ActionType::indexStats
<< ActionType::enableProfiler << ActionType::listCollections << ActionType::listIndexes
<< ActionType::planCacheIndexFilter << ActionType::planCacheRead
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index b23b3d34ed8..ba6f6c87e6c 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -157,7 +157,8 @@ bool planCacheContains(const PlanCache& planCache,
PlanCacheEntry* entry = *i;
// Canonicalizing query shape in cache entry to get cache key.
- // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
+ // Alternatively, we could add key to PlanCacheEntry but that would be used in one place
+ // only.
ASSERT_OK(
CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw));
unique_ptr<CanonicalQuery> currentQuery(cqRaw);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 59eca8ae4c4..f32ebe3b8d8 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -781,7 +781,8 @@ void State::init() {
_scope->invoke(init, 0, 0, 0, true);
// js function to run reduce on all keys
- // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key);
+ // list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
_reduceAll = _scope->createFunction(
"var map = _mrMap;"
"var list, ret;"
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 146943fcfc3..a3e5f735afa 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -69,8 +69,8 @@ struct Helpers {
/* fetch a single object from collection ns that matches query.
set your db SavedContext first.
- @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
- won't work.
+ @param query - the query to perform. note this is the low level portion of query so
+ "orderby : ..." won't work.
@param requireIndex if true, assert if no index for the query. a way to guard against
writing a slow query.
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 10f57a17e09..795bdf75b90 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -74,7 +74,8 @@ namespace mongo {
std::string collection;
int nToSkip;
int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
- // greater than zero is simply a hint on how many objects to send back per "cursor batch".
+ // greater than zero is simply a hint on how many objects to send back per
+ // "cursor batch".
// a negative number indicates a hard limit.
JSObject query;
[JSObject fieldsToReturn]
diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h
index 7a3a1c6a840..293fe5185e4 100644
--- a/src/mongo/db/geo/s2.h
+++ b/src/mongo/db/geo/s2.h
@@ -29,8 +29,8 @@
#pragma once
/*
- * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags (struct vs class)
- * in only the s2.h file
+ * This file's purpose is to confine the suppression of the Clang warning for
+ * mismatched-tags (struct vs class) in only the s2.h file
*/
#ifdef __clang__
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 619fc64133d..8284ca8f3ab 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -275,7 +275,8 @@ Status RegexMatchExpression::init(StringData path, StringData regex, StringData
}
bool RegexMatchExpression::matchesSingleElement(const BSONElement& e) const {
- // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl;
+ // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e <<
+ // std::endl;
switch (e.type()) {
case String:
case Symbol:
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 27e929447a1..e3997f7d4a2 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -788,8 +788,8 @@ Status canonicalizeMongodOptions(moe::Environment* params) {
}
}
- // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is
- // set since that comes from the command line.
+ // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc"
+ // is set since that comes from the command line.
if (params->count("noprealloc")) {
Status ret = params->set("storage.mmapv1.preallocDataFiles",
moe::Value(!(*params)["noprealloc"].as<bool>()));
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index cd1067b7b45..881b9e1d468 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -149,7 +149,8 @@ public:
NamespaceString getTargetNSForListIndexesGetMore() const;
/**
- * @return true if the namespace is valid. Special namespaces for internal use are considered as valid.
+ * @return true if the namespace is valid. Special namespaces for internal use are considered as
+ * valid.
*/
bool isValid() const {
return validDBName(db()) && !coll().empty();
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index 77a44c4b834..732e230741e 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -53,8 +53,8 @@ using std::string;
namespace repl {
namespace {
-// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not,
-// at write time, we can just do an insert, which will be faster.
+// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if
+// it does not, at write time, we can just do an insert, which will be faster.
// The count (of batches) and time spent fetching pages before application
// -- meaning depends on the prefetch behavior: all, _id index, none, etc.)
@@ -69,9 +69,9 @@ void prefetchIndexPages(OperationContext* txn,
Collection* collection,
const BackgroundSync::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
- // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
- // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
- // #3 (per op), a big issue would be "too many knobs".
+ // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op
+ // type? One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for
+ // inserts. #3 (per op), a big issue would be "too many knobs".
switch (prefetchConfig) {
case BackgroundSync::PREFETCH_NONE:
return;
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 189910bbae1..1472d6693ae 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1132,8 +1132,8 @@ std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
// Generate prefix of field up to (but not including) array index.
std::vector<std::string> prefixStrings(res);
prefixStrings.resize(i);
- // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
- // to the end of projectedField.
+ // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined
+ // string to the end of projectedField.
std::string projectedField;
mongo::joinStringDelim(prefixStrings, &projectedField, '.');
return projectedField;
diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h
index 55a05ff5161..8ab6bf9d58a 100644
--- a/src/mongo/db/query/planner_access.h
+++ b/src/mongo/db/query/planner_access.h
@@ -218,8 +218,8 @@ public:
// a filter on the entire tree.
// 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
// who set the value of inArrayOperator to true.
- // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
- // of these operators.
+ // 3. No compound indices are used and no bounds are combined. These are
+ // incorrect in the context of these operators.
//
/**
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index 11fc175d2ac..7129f01af73 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -601,15 +601,16 @@ TEST_F(QueryPlannerTest, CompoundGeoNoGeoPredicateMultikey) {
/*
TEST_F(QueryPlannerTest, SortOnGeoQuery) {
addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates:
+ [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}"); BSONObj sort = fromjson("{timestamp:
+ -1}");
runQuerySortProj(query, sort, BSONObj());
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
"node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
-}
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position:
+ '2dsphere'}}}}}"); }
TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
// true means multikey
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index 5c84a724b94..81184fe42ad 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -421,7 +421,8 @@ bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
return true;
}
- /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
+ /* we're not ahead? maybe our new query got fresher data. best to come back and try
+ again */
log() << "syncTail condition 1";
sleepsecs(1);
} catch (DBException& e) {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 1e1bd428d39..074a7fcbe77 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -28,10 +28,12 @@
/* Collections we use:
- local.sources - indicates what sources we pull from as a "slave", and the last update of each
+ local.sources - indicates what sources we pull from as a "slave", and the last update of
+ each
local.oplog.$main - our op log as "master"
local.dbinfo.<dbname> - no longer used???
- local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy.
+ local.pair.startup - [deprecated] can contain a special value indicating for a pair that we
+ have the master copy.
used when replacing other half of the pair which has permanently failed.
local.pair.sync - [deprecated] { initialsynccomplete: 1 }
*/
@@ -736,7 +738,8 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
unique_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
if (replAllDead) {
- // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ // hmmm why is this check here and not at top of this function? does it get set between top
+ // and here?
log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
throw SyncException();
}
@@ -993,7 +996,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
verify(syncedTo < nextOpTime);
throw SyncException();
} else {
- /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
+ /* t == syncedTo, so the first op was applied previously or it is the first op of
+ * initial query and need not be applied. */
}
}
@@ -1115,7 +1119,8 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) {
}
nClonedThisPass = 0;
- // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
+ // FIXME Handle cases where this db isn't on default port, or default port is spec'd in
+ // hostName.
if ((string("localhost") == hostName || string("127.0.0.1") == hostName) &&
serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) {
log() << "can't sync from self (localhost). sources configuration may be wrong." << endl;
@@ -1223,9 +1228,9 @@ static void replMain(OperationContext* txn) {
break;
}
}
- verify(
- syncing ==
- 0); // i.e., there is only one sync thread running. we will want to change/fix this.
+
+ // i.e., there is only one sync thread running. we will want to change/fix this.
+ verify(syncing == 0);
syncing++;
}
@@ -1398,7 +1403,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
void pretouchOperation(OperationContext* txn, const BSONObj& op) {
if (txn->lockState()->isWriteLocked()) {
- return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ return;
}
const char* which = "o";
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index d290be23de3..e26fd3c0f56 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -34,7 +34,8 @@
/* replication data overview
at the slave:
- local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ...,
+ dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
at the master:
local.oplog.$<source>
@@ -68,7 +69,8 @@ public:
Can be a group of things to replicate for several databases.
- { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... },
+ incompleteCloneDbs: { ... } }
'source' defaults to 'main'; support for multiple source names is
not done (always use main for now).
@@ -135,8 +137,10 @@ public:
std::string sourceName() const {
return _sourceName.empty() ? "main" : _sourceName;
}
- std::string
- only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ // only a certain db. note that in the sources collection, this may not be changed once you
+ // start replicating.
+ std::string only;
/* the last time point we have already synced up to (in the remote/master's oplog). */
Timestamp syncedTo;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 2afa1b53c52..26ab199fae2 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -687,8 +687,8 @@ Status applyOperation_inlock(OperationContext* txn,
return Status(ErrorCodes::OperationFailed, msg);
}
- // Otherwise, it's present; zero objects were updated because of additional specifiers
- // in the query for idempotence
+ // Otherwise, it's present; zero objects were updated because of additional
+ // specifiers in the query for idempotence
} else {
// this could happen benignly on an oplog duplicate replay of an upsert
// (because we are idempotent),
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index 5c1e6032acc..1ce5fa6b4dc 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -51,7 +51,10 @@ class ReplSettings {
public:
SlaveTypes slave;
- /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
+ /**
+ * true means we are master and doing replication. if we are not writing to oplog, this won't
+ * be true.
+ */
bool master;
bool fastsync;
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 57b403aa434..f5d0086c1ef 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -113,7 +113,8 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
if (level > 1) {
wassert(!txn->lockState()->isLocked());
- // note: there is no so-style timeout on this connection; perhaps we should have one.
+ // note: there is no so-style timeout on this connection; perhaps we should have
+ // one.
ScopedDbConnection conn(s["host"].valuestr());
DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
index 8742f25e285..bee3fb4f86a 100644
--- a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
+++ b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
@@ -134,8 +134,8 @@ void AlignedBuilder::_malloc(unsigned sz) {
_p._allocationAddress = p;
_p._data = (char*)p;
#elif defined(__linux__)
- // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris.
- // so for now, linux only for this.
+ // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be
+ // tested on solaris. so for now, linux only for this.
void* p = 0;
int res = posix_memalign(&p, Alignment, sz);
massert(13524, "out of memory AlignedBuilder", res == 0);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index b4e42196c99..da0d26a5cbf 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -2382,9 +2382,11 @@ public:
}
// too much work to try to make this happen through inserts and deletes
// we are intentionally manipulating the btree bucket directly here
- BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >(
+ &bt()->keyNode( 1 ).prevChildBucket );
writing(L)->Null();
- writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ writingInt( const_cast< BtreeBucket::Loc& >(
+ bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
BSONObj k = BSON( "a" << toInsert );
Base::insert( k );
}
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.cpp b/src/mongo/db/storage/mmap_v1/btree/key.cpp
index cbb89d8fab9..2e78e5e008f 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/key.cpp
@@ -201,7 +201,8 @@ int KeyBson::woCompare(const KeyBson& r, const Ordering& o) const {
return oldCompare(_o, r._o, o);
}
-// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a
+// big effort
bool KeyBson::woEqual(const KeyBson& r) const {
return oldCompare(_o, r._o, nullOrdering) == 0;
}
@@ -499,7 +500,8 @@ static int compare(const unsigned char*& l, const unsigned char*& r) {
int llen = binDataCodeToLength(L);
int diff = L - R; // checks length and subtype simultaneously
if (diff) {
- // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit
+ // swap...)
int rlen = binDataCodeToLength(R);
if (llen != rlen)
return llen - rlen;
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.h b/src/mongo/db/storage/mmap_v1/btree/key.h
index 4787d83281a..906ddcc621b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.h
+++ b/src/mongo/db/storage/mmap_v1/btree/key.h
@@ -81,10 +81,11 @@ class KeyV1Owned;
// corresponding to BtreeData_V1
class KeyV1 {
- void operator=(
- const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
- KeyV1(
- const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ // disallowed just to make people be careful as we don't own the buffer
+ void operator=(const KeyV1&);
+ // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ KeyV1(const KeyV1Owned&);
+
public:
KeyV1() {
_keyData = 0;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.h b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
index f93112de47f..0e382beade2 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
@@ -84,12 +84,12 @@ public:
return buf;
}
- /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
- (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
- file support. */
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more
+ * than 10 indexes (more than 10 IndexDetails). It's a bit hacky because of this late addition
+ * with backward file support. */
std::string extraName(int i) const;
- bool isExtra()
- const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ bool isExtra() const;
enum MaxNsLenValue {
// Maximum possible length of name any namespace, including special ones like $extra.
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index 5002bf267c7..a6604e1fb04 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -97,8 +97,8 @@ public:
DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
DiskLoc capFirstNewRecord;
- unsigned short
- _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short _dataFileVersion;
unsigned short _indexFileVersion;
unsigned long long multiKeyIndexBits;
@@ -115,8 +115,8 @@ public:
DiskLoc deletedListLarge[LargeBuckets];
// Think carefully before using this. We need at least 8 bytes reserved to leave room for a
- // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still _reservedA
- // above, but these are the final two reserved 8-byte regions.
+ // DiskLoc pointing to more data (eg in a dummy MmapV1RecordHeader or Extent). There is still
+ // _reservedA above, but these are the final two reserved 8-byte regions.
char _reserved[8];
/*-------- end data 496 bytes */
public:
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index ed6e08e7931..a9252188d43 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -126,8 +126,10 @@ class DataFileHeader {
public:
DataFileVersion version;
int fileLength;
- DiskLoc
- unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ /**
+ * unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more
+ */
+ DiskLoc unused;
int unusedLength;
DiskLoc freeListStart;
DiskLoc freeListEnd;
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 662daf074d5..5a675b40b92 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -52,12 +52,15 @@ class BtreeBucket;
(such as adding a virtual function)
*/
class DiskLoc {
- int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
+ // this will be volume, file #, etc. but is a logical value could be anything depending on
+ // storage engine
+ int _a;
int ofs;
public:
enum SentinelValues {
- /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
NullOfs = -1,
// Caps the number of files that may be allocated in a database, allowing about 32TB of
@@ -74,15 +77,16 @@ public:
Null();
}
- // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and extent
- // headers must precede Records in a file.
+ // Minimum allowed DiskLoc. No MmapV1RecordHeader may begin at this location because file and
+ // extent headers must precede Records in a file.
static DiskLoc min() {
return DiskLoc(0, 0);
}
// Maximum allowed DiskLoc.
- // No MmapV1RecordHeader may begin at this location because the minimum size of a MmapV1RecordHeader is larger than
- // one byte. Also, the last bit is not able to be used because mmapv1 uses that for "used".
+ // No MmapV1RecordHeader may begin at this location because the minimum size of a
+ // MmapV1RecordHeader is larger than one byte. Also, the last bit is not able to be used
+ // because mmapv1 uses that for "used".
static DiskLoc max() {
return DiskLoc(0x7fffffff, 0x7ffffffe);
}
@@ -96,8 +100,9 @@ public:
}
DiskLoc& Null() {
_a = -1;
- ofs =
- 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
+ ofs = 0;
return *this;
}
void assertOk() const {
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index 21c729eea17..a17a7a80d51 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -34,19 +34,21 @@
we could be in read lock for this
for very large objects write directly to redo log in situ?
WRITETOJOURNAL
- we could be unlocked (the main db lock that is...) for this, with sufficient care, but there is some complexity
- have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that).
- for now (1.7.5/1.8.0) we are in read lock which is not ideal.
+ we could be unlocked (the main db lock that is...) for this, with sufficient care, but there
+ is some complexity have to handle falling behind which would use too much ram (going back
+ into a read lock would suffice to stop that). for now (1.7.5/1.8.0) we are in read lock which
+ is not ideal.
WRITETODATAFILES
- actually write to the database data files in this phase. currently done by memcpy'ing the writes back to
- the non-private MMF. alternatively one could write to the files the traditional way; however the way our
- storage engine works that isn't any faster (actually measured a tiny bit slower).
+ actually write to the database data files in this phase. currently done by memcpy'ing the
+ writes back to the non-private MMF. alternatively one could write to the files the
+ traditional way; however the way our storage engine works that isn't any faster (actually
+ measured a tiny bit slower).
REMAPPRIVATEVIEW
- we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
- remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
- to be too frequent.
- there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
- be required. so doing these remaps fractionally is helpful.
+ we could in a write lock quickly flip readers back to the main view, then stay in read lock
+ and do our real remapping. with many files (e.g., 1000), remapping could be time consuming
+ (several ms), so we don't want to be too frequent. there could be a slow down immediately
+ after remapping as fresh copy-on-writes for commonly written pages will
+ be required. so doing these remaps fractionally is helpful.
mutexes:
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalformat.h b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
index 3c31c2686dd..964c0b79b9b 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalformat.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
@@ -49,11 +49,12 @@ struct JHeader {
JHeader() {}
JHeader(std::string fname);
- char magic
- [2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
+ // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or
+ // something...
+ char magic[2];
-// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
-// that. simply incrementing the version # is safe on a fwd basis.
+// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were
+// near that. simply incrementing the version # is safe on a fwd basis.
#if defined(_NOCOMPRESS)
enum { CurrentVersion = 0x4148 };
#else
@@ -62,15 +63,15 @@ struct JHeader {
unsigned short _version;
// these are just for diagnostic ease (make header more useful as plain text)
- char n1; // '\n'
- char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
- char n2; // '\n'
- char dbpath
- [128]; // path/filename of this file for human reading and diagnostics. not used by code.
- char n3, n4; // '\n', '\n'
+ char n1; // '\n'
+ char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
+ char n2; // '\n'
+ char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used
+ // by code.
+ char n3, n4; // '\n', '\n'
- unsigned long long
- fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+ unsigned long long fileId; // unique identifier that will be in each JSectHeader.
+ // important as we recycle prealloced files
char reserved3[8026]; // 8KB total for the file header
char txt2[2]; // "\n\n" at the end
@@ -112,7 +113,8 @@ public:
};
/** an individual write operation within a group commit section. Either the entire section should
- be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
+ be applied, or nothing. (We check the md5 for the whole section before doing anything on
+ recovery.)
*/
struct JEntry {
enum OpCodes {
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
index 86a2d19de97..77e79ccb8d1 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
@@ -108,8 +108,8 @@ private:
static void preFlush();
static void postFlush();
unsigned long long _preFlushTime;
- unsigned long long
- _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ unsigned long long _lastFlushTime;
bool _writeToLSNNeeded;
void updateLSNFile();
};
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index dc9d7fb2b7a..10651cc1ae8 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -68,7 +68,10 @@ static DurableMappedFile* findMMF_inlock(void* ptr, size_t& ofs) {
DurableMappedFile* f = privateViews.find_inlock(ptr, ofs);
if (f == 0) {
error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
- printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
+
+ // we want a stack trace and the assert below didn't print a trace once in the real world
+ // - not sure why
+ printStackTrace();
stringstream ss;
ss << "view pointer cannot be resolved " << std::hex << (size_t)ptr;
journalingFailure(ss.str().c_str()); // asserts, which then abends
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index a6958ad1aec..209acc92cea 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -207,7 +207,8 @@ public:
_entries->skip(len + 1); // skip '\0' too
_entries->read(lenOrOpCode); // read this for the fall through
}
- // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
+ // fall through as a basic operation always follows jdbcontext, and we don't have
+ // anything to return yet
default:
// fall through
@@ -517,7 +518,8 @@ bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
return true;
}
} catch (...) {
- // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ // if something weird like a permissions problem keep going so the massert down below can
+ // happen (presumably)
log() << "recover exception checking filesize" << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h
index 8ec6f8c024f..0b3daf7f021 100644
--- a/src/mongo/db/storage/mmap_v1/dur_stats.h
+++ b/src/mongo/db/storage/mmap_v1/dur_stats.h
@@ -33,9 +33,11 @@
namespace mongo {
namespace dur {
-/** journaling stats. the model here is that the commit thread is the only writer, and that reads are
- uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
-*/
+/**
+ * journaling stats. the model here is that the commit thread is the only writer, and that reads
+ * are uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter
+ * overhead.
+ */
struct Stats {
struct S {
std::string _CSVHeader() const;
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index fad28753372..967f1c92a43 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -28,9 +28,11 @@
* it in the license file.
*/
-/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this.
-*/
+/**
+ * this module adds some of our layers atop memory mapped files - specifically our handling of
+ * private views & such if you don't care about journaling/durability (temp sort files & such) use
+ * MemoryMappedFile class, not this.
+ */
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
@@ -275,9 +277,8 @@ bool DurableMappedFile::finishOpening() {
"(look in log for "
"more information)");
}
- privateViews.add_inlock(
- _view_private,
- this); // note that testIntent builds use this, even though it points to view_write then...
+ // note that testIntent builds use this, even though it points to view_write then...
+ privateViews.add_inlock(_view_private, this);
} else {
_view_private = _view_write;
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index 02906f112fe..2697613890b 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -37,10 +37,11 @@
namespace mongo {
-/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such.
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
- not this.
-*/
+/**
+ * DurableMappedFile adds some layers atop memory mapped files - specifically our handling of
+ * private views & such. if you don't care about journaling/durability (temp sort files & such) use
+ * MemoryMappedFile class, not this.
+ */
class DurableMappedFile : private MemoryMappedFile {
protected:
virtual void* viewForFlushing() {
@@ -276,6 +277,7 @@ inline void PointerToDurableMappedFile::makeWritable(void* privateView, unsigned
inline void PointerToDurableMappedFile::makeWritable(void* _p, unsigned len) {}
#endif
-// allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object
+// allows a pointer into any private view of a DurableMappedFile to be resolved to the
+// DurableMappedFile object
extern PointerToDurableMappedFile privateViews;
}
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index 8efd7720c3e..0ea1949ad12 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -135,9 +135,9 @@ bool FileCreatedOp::needFilesClosed() {
}
void FileCreatedOp::replay() {
- // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
- // or rewrite at least, even if it were the right length. perhaps one day we should change that
- // although easier to avoid defects if we assume it is zeros perhaps.
+ // i believe the code assumes new files are filled with zeros. thus we have to recreate the
+ // file, or rewrite at least, even if it were the right length. perhaps one day we should
+ // change that although easier to avoid defects if we assume it is zeros perhaps.
string full = _p.asFullPath();
if (boost::filesystem::exists(full)) {
try {
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index a798f210616..50ddc33318a 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -43,9 +43,9 @@ namespace dur {
/** DurOp - Operations we journal that aren't just basic writes.
*
- * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
- * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
- * them (we don't want a vtable for example there).
+ * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct
+ * dur::WriteIntent. We don't make WriteIntent inherit from DurOp to keep it as lean as possible as
+ * there will be millions of them (we don't want a vtable for example there).
*
* For each op we want to journal, we define a subclass.
*/
diff --git a/src/mongo/db/storage/mmap_v1/extent.h b/src/mongo/db/storage/mmap_v1/extent.h
index 9d6d3935346..16af89fb42b 100644
--- a/src/mongo/db/storage/mmap_v1/extent.h
+++ b/src/mongo/db/storage/mmap_v1/extent.h
@@ -42,7 +42,8 @@ namespace mongo {
/* extents are datafile regions where all the records within the region
belong to the same namespace.
-(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big
+ DeletedRecord
(11:12:55 AM) dm10gen: and that is placed on the free list
*/
#pragma pack(1)
diff --git a/src/mongo/db/storage/mmap_v1/extent_manager.h b/src/mongo/db/storage/mmap_v1/extent_manager.h
index 6151f8e11a2..052634d639b 100644
--- a/src/mongo/db/storage/mmap_v1/extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/extent_manager.h
@@ -106,9 +106,9 @@ public:
/**
* @param loc - has to be for a specific MmapV1RecordHeader
* Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an
+ * offset from an extent. This intrinsically links an original record store to the original
+ * extent manager.
*/
virtual MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const = 0;
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 0500ad43a83..f033e1c6a5a 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -317,7 +317,8 @@ void FileAllocator::ensureLength(int fd, long size) {
void FileAllocator::checkFailure() {
if (_failed) {
- // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
+ // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack
+ // trace
msgassertedNoTrace(12520, "new file allocation failure");
}
}
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index 0f21961d459..d886eaf8b45 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -121,7 +121,8 @@ public:
sleepmillis(4);
}
long long y = t2.micros() - 4 * N * 1000;
- // not really trusting the timer granularity on all platforms so whichever is higher of x and y
+ // not really trusting the timer granularity on all platforms so whichever is higher
+ // of x and y
bb[pass].append("8KBWithPauses", max(x, y) / (N * 1000.0));
}
{
diff --git a/src/mongo/db/storage/mmap_v1/logfile.h b/src/mongo/db/storage/mmap_v1/logfile.h
index 4a3bb5535e2..abfb875ee4b 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.h
+++ b/src/mongo/db/storage/mmap_v1/logfile.h
@@ -51,7 +51,8 @@ public:
*/
void synchronousAppend(const void* buf, size_t len);
- /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
+ /** write at specified offset. must be aligned. noreturn until physically written. thread safe
+ * */
void writeAt(unsigned long long offset, const void* _bug, size_t _len);
void readAt(unsigned long long offset, void* _buf, size_t _len);
diff --git a/src/mongo/db/storage/mmap_v1/mmap.h b/src/mongo/db/storage/mmap_v1/mmap.h
index ae9a0796a4b..6413dc26127 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.h
+++ b/src/mongo/db/storage/mmap_v1/mmap.h
@@ -123,8 +123,10 @@ public:
template <class F>
static void forEach(F fun);
- /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
-*/
+ /**
+ * note: you need to be in mmmutex when using this. forEach (above) handles that for you
+ * automatically.
+ */
static std::set<MongoFile*>& getAllFiles();
// callbacks if you need them
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 69d80422e66..d7c44aabfab 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -444,8 +444,8 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
break;
}
if (t.seconds() >= 2) {
- // have spent lots of time in write lock, and we are in [low,high], so close enough
- // could come into play if extent freelist is very long
+ // have spent lots of time in write lock, and we are in [low,high], so close
+ // enough could come into play if extent freelist is very long
break;
}
} else {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
index 1f7a0963aa1..a2f2931e1b4 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h
@@ -113,9 +113,9 @@ public:
/**
* @param loc - has to be for a specific MmapV1RecordHeader
* Note(erh): this sadly cannot be removed.
- * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an offset
- * from an extent. This intrinsically links an original record store to the original extent
- * manager.
+ * A MmapV1RecordHeader DiskLoc has an offset from a file, while a RecordStore really wants an
+ * offset from an extent. This intrinsically links an original record store to the original
+ * extent manager.
*/
MmapV1RecordHeader* recordForV1(const DiskLoc& loc) const;
diff --git a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
index 88abedd9c77..aba533e5844 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_windows.cpp
@@ -228,7 +228,8 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length,
verify(fd == 0 && len == 0); // can't open more than once
setFilename(filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
- /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
+ /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
+ * perhaps. */
char filename[256];
strncpy(filename, filenameIn, 255);
filename[255] = 0;
diff --git a/src/mongo/db/storage/mmap_v1/record.h b/src/mongo/db/storage/mmap_v1/record.h
index 0f3f9ebcdd4..a37d49101b1 100644
--- a/src/mongo/db/storage/mmap_v1/record.h
+++ b/src/mongo/db/storage/mmap_v1/record.h
@@ -42,13 +42,16 @@ class DeletedRecord;
/* MmapV1RecordHeader is a record in a datafile. DeletedRecord is similar but for deleted space.
*11:03:20 AM) dm10gen: regarding extentOfs...
-(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and
+ DeleteRecords
(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
-(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent
+ address, we keep just the offset
(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how MmapV1RecordHeader::myExtent() works
-(11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its extentOfs then
+(11:04:53 AM) dm10gen: on an alloc(), when we build a new MmapV1RecordHeader, we must populate its
+ extentOfs then
*/
#pragma pack(1)
class MmapV1RecordHeader {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index cc8cf582ffe..0e7d667f84f 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -788,8 +788,8 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
if (loc.questionable()) {
if (isCapped() && !loc.isValid() && i == 1) {
- /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
- see comments in namespace.h
+ /* the constructor for NamespaceDetails intentionally sets
+ * deletedList[1] to invalid see comments in namespace.h
*/
break;
}
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index 5948553b9af..aa161cae41e 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -300,8 +300,8 @@ void SimpleRecordStoreV1::_compactExtent(OperationContext* txn,
fassert(17437, sourceExtent->validates(extentLoc));
{
- // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we first
- // page in the whole Extent sequentially.
+ // The next/prev MmapV1RecordHeader pointers within the Extent might not be in order so we
+ // first page in the whole Extent sequentially.
// TODO benchmark on slow storage to verify this is measurably faster.
log() << "compact paging in len=" << sourceExtent->length / 1000000.0 << "MB" << endl;
Timer t;
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index 0564d183918..6edc21ca24b 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -189,9 +189,8 @@ void keyTest(const BSONObj& o, bool mustBeCompact = false) {
cout << r3 << endl;
}
ASSERT(ok);
- if (k.isCompactFormat() &&
- kLast
- ->isCompactFormat()) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ if (k.isCompactFormat() && kLast->isCompactFormat()) {
+ // only check if not bson as bson woEqual is broken! (or was may2011)
if (k.woEqual(*kLast) != (r2 == 0)) { // check woEqual matches
cout << r2 << endl;
cout << k.toString() << endl;
@@ -909,7 +908,8 @@ public:
ASSERT_EQUALS("123.4567891234568", x["d"].toString(false, true));
ASSERT_EQUALS("123456789.1234568", x["e"].toString(false, true));
- // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+ // windows and *nix are different - TODO, work around for test or not bother?
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) );
ASSERT_EQUALS("-123.456", x["g"].toString(false, true));
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 58f799b83d0..68c6e330258 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -637,7 +637,8 @@ public:
ASSERT_EQUALS(5, out["z"].number());
ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type());
// Commenting so that v8 tests will work
- // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+ // TODO: this is technically bad, but here to make sure that i understand the behavior
+ // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() );
// Eliot says I don't have to worry about this case
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index 344f7522db9..3dd81c3eb27 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -142,302 +142,305 @@ public:
namespace NamespaceDetailsTests {
#if 0 // SERVER-13640
- class Base {
- const char *ns_;
+ class Base {
+ const char *ns_;
+ Lock::GlobalWrite lk;
+ OldClientContext _context;
+ public:
+ Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
+ virtual ~Base() {
+ OperationContextImpl txn;
+ if ( !nsd() )
+ return;
+ _context.db()->dropCollection( &txn, ns() );
+ }
+ protected:
+ void create() {
Lock::GlobalWrite lk;
- OldClientContext _context;
- public:
- Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
- virtual ~Base() {
- OperationContextImpl txn;
- if ( !nsd() )
- return;
- _context.db()->dropCollection( &txn, ns() );
- }
- protected:
- void create() {
- Lock::GlobalWrite lk;
- OperationContextImpl txn;
- ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
- }
- virtual string spec() const = 0;
- int nRecords() const {
- int count = 0;
- const Extent* ext;
- for ( RecordId extLoc = nsd()->firstExtent();
- !extLoc.isNull();
- extLoc = ext->xnext) {
- ext = extentManager()->getExtent(extLoc);
- int fileNo = ext->firstRecord.a();
- if ( fileNo == -1 )
- continue;
- for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs;
- recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) {
- ++count;
- }
- }
- ASSERT_EQUALS( count, nsd()->numRecords() );
- return count;
- }
- int nExtents() const {
- int count = 0;
- for ( RecordId extLoc = nsd()->firstExtent();
- !extLoc.isNull();
- extLoc = extentManager()->getExtent(extLoc)->xnext ) {
+ OperationContextImpl txn;
+ ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
+ }
+ virtual string spec() const = 0;
+ int nRecords() const {
+ int count = 0;
+ const Extent* ext;
+ for ( RecordId extLoc = nsd()->firstExtent();
+ !extLoc.isNull();
+ extLoc = ext->xnext) {
+ ext = extentManager()->getExtent(extLoc);
+ int fileNo = ext->firstRecord.a();
+ if ( fileNo == -1 )
+ continue;
+ for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs;
+ recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) {
++count;
}
- return count;
- }
- const char *ns() const {
- return ns_;
- }
- const NamespaceDetails *nsd() const {
- Collection* c = collection();
- if ( !c )
- return NULL;
- return c->detailsDeprecated();
- }
- const RecordStore* recordStore() const {
- Collection* c = collection();
- if ( !c )
- return NULL;
- return c->getRecordStore();
- }
- Database* db() const {
- return _context.db();
}
- const ExtentManager* extentManager() const {
- return db()->getExtentManager();
- }
- Collection* collection() const {
- return db()->getCollection( ns() );
- }
-
- static BSONObj bigObj() {
- BSONObjBuilder b;
- b.appendOID("_id", 0, true);
- string as( 187, 'a' );
- b.append( "a", as );
- return b.obj();
+ ASSERT_EQUALS( count, nsd()->numRecords() );
+ return count;
+ }
+ int nExtents() const {
+ int count = 0;
+ for ( RecordId extLoc = nsd()->firstExtent();
+ !extLoc.isNull();
+ extLoc = extentManager()->getExtent(extLoc)->xnext ) {
+ ++count;
}
+ return count;
+ }
+ const char *ns() const {
+ return ns_;
+ }
+ const NamespaceDetails *nsd() const {
+ Collection* c = collection();
+ if ( !c )
+ return NULL;
+ return c->detailsDeprecated();
+ }
+ const RecordStore* recordStore() const {
+ Collection* c = collection();
+ if ( !c )
+ return NULL;
+ return c->getRecordStore();
+ }
+ Database* db() const {
+ return _context.db();
+ }
+ const ExtentManager* extentManager() const {
+ return db()->getExtentManager();
+ }
+ Collection* collection() const {
+ return db()->getCollection( ns() );
+ }
- };
-
- class Create : public Base {
- public:
- void run() {
- create();
- ASSERT( nsd() );
- ASSERT_EQUALS( 0, nRecords() );
- ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
- RecordId initial = RecordId();
- initial.setInvalid();
- ASSERT( initial == nsd()->capFirstNewRecord() );
- }
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
+ static BSONObj bigObj() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ string as( 187, 'a' );
+ b.append( "a", as );
+ return b.obj();
+ }
- class SingleAlloc : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT( nsd() );
+ ASSERT_EQUALS( 0, nRecords() );
+ ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
+ RecordId initial = RecordId();
+ initial.setInvalid();
+ ASSERT( initial == nsd()->capFirstNewRecord() );
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
+
+ class SingleAlloc : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ BSONObj b = bigObj();
+ ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
+ ASSERT_EQUALS( 1, nRecords() );
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
+
+ class Realloc : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+
+ const int N = 20;
+ const int Q = 16; // these constants depend on the size of the bson object, the extent
+ // size allocated by the system too
+ RecordId l[ N ];
+ for ( int i = 0; i < N; ++i ) {
BSONObj b = bigObj();
- ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
- ASSERT_EQUALS( 1, nRecords() );
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, b, true );
+ ASSERT( status.isOK() );
+ l[ i ] = status.getValue();
+ ASSERT( !l[ i ].isNull() );
+ ASSERT( nRecords() <= Q );
+ //ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i >= 16 )
+ ASSERT( l[ i ] == l[ i - Q] );
}
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
-
- class Realloc : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
-
- const int N = 20;
- const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
- RecordId l[ N ];
- for ( int i = 0; i < N; ++i ) {
- BSONObj b = bigObj();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, b, true );
- ASSERT( status.isOK() );
- l[ i ] = status.getValue();
- ASSERT( !l[ i ].isNull() );
- ASSERT( nRecords() <= Q );
- //ASSERT_EQUALS( 1 + i % 2, nRecords() );
- if ( i >= 16 )
- ASSERT( l[ i ] == l[ i - Q] );
- }
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
+
+ class TwoExtent : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ RecordId l[ 8 ];
+ for ( int i = 0; i < 8; ++i ) {
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigObj(), true );
+ ASSERT( status.isOK() );
+ l[ i ] = status.getValue();
+ ASSERT( !l[ i ].isNull() );
+ //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ //if ( i > 3 )
+ // ASSERT( l[ i ] == l[ i - 4 ] );
}
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
+ ASSERT( nRecords() == 8 );
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID( "_id", NULL, true );
+ bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
+ BSONObj bigger = bob.done();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, false );
+ ASSERT( !status.isOK() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ private:
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ };
- class TwoExtent : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
- ASSERT_EQUALS( 2, nExtents() );
-
- RecordId l[ 8 ];
- for ( int i = 0; i < 8; ++i ) {
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigObj(), true );
- ASSERT( status.isOK() );
- l[ i ] = status.getValue();
- ASSERT( !l[ i ].isNull() );
- //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- //if ( i > 3 )
- // ASSERT( l[ i ] == l[ i - 4 ] );
- }
- ASSERT( nRecords() == 8 );
-
- // Too big
- BSONObjBuilder bob;
- bob.appendOID( "_id", NULL, true );
- bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
- BSONObj bigger = bob.done();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, false );
- ASSERT( !status.isOK() );
- ASSERT_EQUALS( 0, nRecords() );
- }
- private:
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
- }
- };
+ BSONObj docForRecordSize( int size ) {
+ BSONObjBuilder b;
+ b.append( "_id", 5 );
+ b.append( "x", string( size - Record::HeaderSize - 22, 'x' ) );
+ BSONObj x = b.obj();
+ ASSERT_EQUALS( Record::HeaderSize + x.objsize(), size );
+ return x;
+ }
- BSONObj docForRecordSize( int size ) {
- BSONObjBuilder b;
- b.append( "_id", 5 );
- b.append( "x", string( size - Record::HeaderSize - 22, 'x' ) );
- BSONObj x = b.obj();
- ASSERT_EQUALS( Record::HeaderSize + x.objsize(), size );
- return x;
+ /**
+ * alloc() does not quantize records in capped collections.
+ * NB: this actually tests that the code in Database::createCollection doesn't set
+ * PowerOf2Sizes for capped collections.
+ */
+ class AllocCappedNotQuantized : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ ASSERT( nsd()->isCapped() );
+ ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
+
+ StatusWith<RecordId> result =
+ collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
+ ASSERT( result.isOK() );
+ Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
+ // Check that no quantization is performed.
+ ASSERT_EQUALS( 300, record->lengthWithHeaders() );
}
-
- /**
- * alloc() does not quantize records in capped collections.
- * NB: this actually tests that the code in Database::createCollection doesn't set
- * PowerOf2Sizes for capped collections.
- */
- class AllocCappedNotQuantized : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
- ASSERT( nsd()->isCapped() );
- ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
-
- StatusWith<RecordId> result =
- collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
- ASSERT( result.isOK() );
- Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
- // Check that no quantization is performed.
- ASSERT_EQUALS( 300, record->lengthWithHeaders() );
- }
- virtual string spec() const { return "{capped:true,size:2048}"; }
- };
+ virtual string spec() const { return "{capped:true,size:2048}"; }
+ };
- /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc)
- */
- class TruncateCapped : public Base {
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc)
+ */
+ class TruncateCapped : public Base {
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ void pass(int p) {
+ OperationContextImpl txn;
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj();
+
+ int N = MinExtentSize / b.objsize() * nExtents() + 5;
+ int T = N - 4;
+
+ RecordId truncAt;
+ //RecordId l[ 8 ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj bb = bigObj();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
+ ASSERT( status.isOK() );
+ RecordId a = status.getValue();
+ if( T == i )
+ truncAt = a;
+ ASSERT( !a.isNull() );
+ /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ if ( i > 3 )
+ ASSERT( l[ i ] == l[ i - 4 ] );*/
+ }
+ ASSERT( nRecords() < N );
+
+ RecordId last, first;
+ {
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::BACKWARD));
+ runner->getNext(NULL, &last);
+ ASSERT( !last.isNull() );
+ }
+ {
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::FORWARD));
+ runner->getNext(NULL, &first);
+ ASSERT( !first.isNull() );
+ ASSERT( first != last ) ;
}
- void pass(int p) {
- OperationContextImpl txn;
- create();
- ASSERT_EQUALS( 2, nExtents() );
-
- BSONObj b = bigObj();
-
- int N = MinExtentSize / b.objsize() * nExtents() + 5;
- int T = N - 4;
-
- RecordId truncAt;
- //RecordId l[ 8 ];
- for ( int i = 0; i < N; ++i ) {
- BSONObj bb = bigObj();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
- ASSERT( status.isOK() );
- RecordId a = status.getValue();
- if( T == i )
- truncAt = a;
- ASSERT( !a.isNull() );
- /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- if ( i > 3 )
- ASSERT( l[ i ] == l[ i - 4 ] );*/
- }
- ASSERT( nRecords() < N );
-
- RecordId last, first;
- {
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::BACKWARD));
- runner->getNext(NULL, &last);
- ASSERT( !last.isNull() );
- }
- {
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::FORWARD));
- runner->getNext(NULL, &first);
- ASSERT( !first.isNull() );
- ASSERT( first != last ) ;
- }
-
- collection()->temp_cappedTruncateAfter(&txn, truncAt, false);
- ASSERT_EQUALS( collection()->numRecords() , 28u );
-
- {
- RecordId loc;
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::FORWARD));
- runner->getNext(NULL, &loc);
- ASSERT( first == loc);
- }
- {
- unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::BACKWARD));
- RecordId loc;
- runner->getNext(NULL, &loc);
- ASSERT( last != loc );
- ASSERT( !last.isNull() );
- }
- // Too big
- BSONObjBuilder bob;
- bob.appendOID("_id", 0, true);
- bob.append( "a", string( MinExtentSize + 300, 'a' ) );
- BSONObj bigger = bob.done();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, true );
- ASSERT( !status.isOK() );
- ASSERT_EQUALS( 0, nRecords() );
+ collection()->temp_cappedTruncateAfter(&txn, truncAt, false);
+ ASSERT_EQUALS( collection()->numRecords() , 28u );
+
+ {
+ RecordId loc;
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::FORWARD));
+ runner->getNext(NULL, &loc);
+ ASSERT( first == loc);
}
- public:
- void run() {
-// log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
- pass(0);
+ {
+ unique_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::BACKWARD));
+ RecordId loc;
+ runner->getNext(NULL, &loc);
+ ASSERT( last != loc );
+ ASSERT( !last.isNull() );
}
- };
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID("_id", 0, true);
+ bob.append( "a", string( MinExtentSize + 300, 'a' ) );
+ BSONObj bigger = bob.done();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, true );
+ ASSERT( !status.isOK() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ public:
+ void run() {
+// log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
+ pass(0);
+ }
+ };
#endif // SERVER-13640
#if 0 // XXXXXX - once RecordStore is clean, we can put this back
class Migrate : public Base {
public:
void run() {
create();
- nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
- nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing() = RecordId();
+ nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()->
+ nextDeleted().drec()->nextDeleted();
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->
+ nextDeleted().writing() = RecordId();
nsd()->cappedLastDelRecLastExtent().Null();
NamespaceDetails *d = nsd();
@@ -451,7 +454,8 @@ namespace NamespaceDetailsTests {
ASSERT( nsd()->capExtent().getOfs() != 0 );
ASSERT( !nsd()->capFirstNewRecord().isValid() );
int nDeleted = 0;
- for ( RecordId i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
+ for ( RecordId i = nsd()->cappedListOfAllDeletedRecords();
+ !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
ASSERT_EQUALS( 10, nDeleted );
ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
}
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 599287edee0..99877968016 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -1,7 +1,8 @@
/** @file perftests.cpp.cpp : unit tests relating to performance
- The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
- a long time to run. Obviously we need those too, but they will be separate.
+ The idea herein is tests that run fast and can be part of the normal CI suite. So no
+ tests herein that take a long time to run. Obviously we need those too, but they will be
+ separate.
These tests use DBDirectClient; they are a bit white-boxish.
*/
@@ -217,7 +218,8 @@ protected:
virtual string name() = 0;
- // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time
+ // it.
virtual int howLongMillis() {
return profiling ? 30000 : 5000;
}
@@ -1232,7 +1234,8 @@ public:
void prep() {
{
- // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check
+ // that:
unsigned long long x = 0xffffffffffffffffULL;
ASSERT(x + 2 == 1);
}
@@ -1263,8 +1266,9 @@ public:
((char*&)p)[1]--;
c.gen(p, sz);
ASSERT(c != last);
- ((char*&)p)
- [1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ // check same data, different order, doesn't give same checksum (different longwords
+ // case)
+ ((char*&)p)[1]++;
((char*&)p)[8]--;
c.gen(p, sz);
ASSERT(c != last);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 86139ff4fde..f34a84eec06 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -27,8 +27,8 @@
*/
/**
- * This file tests db/exec/and_*.cpp and RecordId invalidation. RecordId invalidation forces a fetch
- * so we cannot test it outside of a dbtest.
+ * This file tests db/exec/and_*.cpp and RecordId invalidation. RecordId invalidation forces a
+ * fetch so we cannot test it outside of a dbtest.
*/
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index e9be534f995..4b15938625a 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -168,9 +168,9 @@ public:
};
//
-// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards is not
-// tested here since there are unresolved race conditions there and probably should be avoided if at all
-// possible.
+// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards
+// is not tested here since there are unresolved race conditions there and probably should be
+// avoided if at all possible.
//
class ChunkManagerCreateFullTest : public ChunkManagerTest {
public:
@@ -212,7 +212,8 @@ public:
set<int> minorVersions;
OID epoch;
- // Check that all chunks were created with version 1|x with consistent epoch and unique minor versions
+ // Check that all chunks were created with version 1|x with consistent epoch and unique
+ // minor versions
while (cursor->more()) {
BSONObj chunk = cursor->next();
@@ -234,8 +235,8 @@ public:
};
//
-// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can be reloaded
-// on top of an old chunk manager with changes.
+// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can
+// be reloaded on top of an old chunk manager with changes.
//
class ChunkManagerLoadBasicTest : public ChunkManagerCreateFullTest {
public:
@@ -325,7 +326,8 @@ public:
}
};
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
+ // Allow validating with and without ranges (b/c our splits won't actually be updated by the
+ // diffs)
void validate(const std::vector<ChunkType>& chunks,
ChunkVersion maxVersion,
const VersionMap& maxShardVersions) {
@@ -354,7 +356,8 @@ public:
for (const ChunkType& chunk : chunks) {
if (ranges != NULL) {
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;
+ // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
+ // " << chunkCount << endl;
RangeMap::iterator chunkRange =
ranges->find(_inverse ? chunk.getMax() : chunk.getMin());
@@ -563,7 +566,8 @@ public:
chunk[ChunkType::shard()].String())
break;
- // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
+ // log() << "... appending chunk from diff shard: " << prevShardChunk <<
+ // endl;
newChunksB.append(prevShardChunk);
prevShardChunk = BSONObj();
@@ -599,7 +603,8 @@ public:
BSONObj newShard = newShardB.obj();
BSONObj prevShard = prevShardB.obj();
- // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;
+ // log() << " ... migrated to " << newShard << " and updated " << prevShard
+ // << endl;
newChunksB.append(newShard);
newChunksB.append(prevShard);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index f87f8245aec..fd695d71460 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -462,8 +462,8 @@ public:
}
};
-// we don't use upgrade so that part is not important currently but the other aspects of this test are
-// interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
+// we don't use upgrade so that part is not important currently but the other aspects of this test
+// are interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
class UpgradableTest : public ThreadedTest<7> {
RWLock m;
@@ -483,7 +483,7 @@ private:
*/
// /-- verify upgrade can be done instantly while in a read lock already
// | /-- verify upgrade acquisition isn't greedy
- // | | /-- verify writes aren't greedy while in upgradable (or are they?)
+ // | | /-- verify writes aren't greedy while in upgradable(or are they?)
// v v v
const char* what = " RURuRwR";
@@ -535,8 +535,10 @@ private:
LOG(Z) << x << ' ' << ch << " got " << endl;
if (what[x] == 'R') {
if (t.millis() > 15) {
- // commented out for less chatter, we aren't using upgradeable anyway right now:
- // log() << x << " info: when in upgradable, write locks are still greedy on this platform" << endl;
+ // commented out for less chatter, we aren't using upgradeable anyway right
+ // now:
+ // log() << x << " info: when in upgradable, write locks are still greedy "
+ // "on this platform" << endl;
}
}
sleepmillis(200);
@@ -561,8 +563,8 @@ void sleepalittle() {
int once;
/* This test is to see how long it takes to get a lock after there has been contention -- the OS
- will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin locks.
- Experimenting with different # of threads would be a good idea.
+ will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin
+ locks. Experimenting with different # of threads would be a good idea.
*/
template <class whichmutex, class scoped>
class Slack : public ThreadedTest<17> {
@@ -720,8 +722,8 @@ private:
};
-// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but only
-// max _nRooms threads should ever get in at once
+// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but
+// only max _nRooms threads should ever get in at once
class TicketHolderWaits : public ThreadedTest<10> {
static const int checkIns = 1000;
static const int rooms = 3;
@@ -779,8 +781,8 @@ private:
}
virtual void validate() {
- // This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
- // Time for test is then ~ #threads / _nRooms * 2 seconds
+ // This should always be true, assuming that it takes < 1 sec for the hardware to process a
+ // check-out/check-in Time for test is then ~ #threads / _nRooms * 2 seconds
verify(_hotel._maxRooms == _hotel._nRooms);
}
};
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
index dc429e85d80..2ca778607f2 100644
--- a/src/mongo/s/balance.cpp
+++ b/src/mongo/s/balance.cpp
@@ -461,8 +461,8 @@ bool Balancer::_init() {
// contact the config server and refresh shard information
// checks that each shard is indeed a different process (no hostname mixup)
- // these checks are redundant in that they're redone at every new round but we want to do them initially here
- // so to catch any problem soon
+ // these checks are redundant in that they're redone at every new round but we want to do
+ // them initially here so to catch any problem soon
Shard::reloadShardInfo();
_checkOIDs();
diff --git a/src/mongo/s/balance.h b/src/mongo/s/balance.h
index c62e05c18f4..d3dc9b39045 100644
--- a/src/mongo/s/balance.h
+++ b/src/mongo/s/balance.h
@@ -85,11 +85,12 @@ private:
bool _init();
/**
- * Gathers all the necessary information about shards and chunks, and decides whether there are candidate chunks to
- * be moved.
+ * Gathers all the necessary information about shards and chunks, and decides whether there are
+ * candidate chunks to be moved.
*
* @param conn is the connection with the config server(s)
- * @param candidateChunks (IN/OUT) filled with candidate chunks, one per collection, that could possibly be moved
+ * @param candidateChunks (IN/OUT) filled with candidate chunks, one per collection, that could
+ * possibly be moved
*/
void _doBalanceRound(std::vector<std::shared_ptr<MigrateInfo>>* candidateChunks);
@@ -111,7 +112,8 @@ private:
void _ping(bool waiting = false);
/**
- * @return true if all the servers listed in configdb as being shards are reachable and are distinct processes
+ * @return true if all the servers listed in configdb as being shards are reachable and are
+ * distinct processes
*/
bool _checkOIDs();
};
diff --git a/src/mongo/s/catalog/legacy/distlock.cpp b/src/mongo/s/catalog/legacy/distlock.cpp
index b0d30b28967..fa9c7a90fde 100644
--- a/src/mongo/s/catalog/legacy/distlock.cpp
+++ b/src/mongo/s/catalog/legacy/distlock.cpp
@@ -106,8 +106,8 @@ DistLockHandle LockException::getMustUnlockID() const {
}
/**
- * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
- * specified (time between pings)
+ * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom
+ * sleep time is specified (time between pings)
*/
DistributedLock::DistributedLock(const ConnectionString& conn,
const string& name,
@@ -155,8 +155,9 @@ const string& DistributedLock::getProcessId() const {
}
/**
- * Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
- * and the actual time on the remote server (at the completion of the function) is the maxNetSkew
+ * Returns the remote time as reported by the cluster or server. The maximum difference between the
+ * reported time and the actual time on the remote server (at the completion of the function) is the
+ * maxNetSkew
*/
Date_t DistributedLock::remoteTime(const ConnectionString& cluster, unsigned long long maxNetSkew) {
ConnectionString server(*cluster.getServers().begin());
@@ -197,8 +198,8 @@ Date_t DistributedLock::remoteTime(const ConnectionString& cluster, unsigned lon
13647);
}
- // Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
- // time value can be off by if we assume a response in the middle of the delay.
+ // Make sure that our delay is not more than 2x our maximum network skew, since this is the max
+ // our remote time value can be off by if we assume a response in the middle of the delay.
if (delay > Milliseconds(maxNetSkew * 2)) {
throw TimeNotFoundException(
str::stream() << "server " << server.toString() << " in cluster " << cluster.toString()
@@ -227,7 +228,8 @@ bool DistributedLock::checkSkew(const ConnectionString& cluster,
if (i == 0)
avgSkews.push_back(0);
- // Could check if this is self, but shouldn't matter since local network connection should be fast.
+ // Could check if this is self, but shouldn't matter since local network connection
+ // should be fast.
ConnectionString server(*si);
vector<long long> skew;
@@ -375,7 +377,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
LockpingsType::ConfigNS, o[LocksType::process()].wrap(LockpingsType::process()));
if (lastPing.isEmpty()) {
LOG(logLvl) << "empty ping found for process in lock '" << lockName << "'" << endl;
- // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
+ // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then,
+ // so will a lot.
lastPing = BSON(LockpingsType::process(o[LocksType::process()].String())
<< LockpingsType::ping(Date_t()));
}
@@ -420,7 +423,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
} catch (LockException& e) {
// Remote server cannot be found / is not responsive
warning() << "Could not get remote time from " << _conn << causedBy(e);
- // If our config server is having issues, forget all the pings until we can see it again
+ // If our config server is having issues, forget all the pings until we can see it
+ // again
resetLastPing();
}
@@ -438,8 +442,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
if (elapsed > takeover) {
// Lock may forced, reset our timer if succeeds or fails
- // Ensures that another timeout must happen if something borks up here, and resets our pristine
- // ping state if acquired.
+ // Ensures that another timeout must happen if something borks up here, and resets
+ // our pristine ping state if acquired.
resetLastPing();
try {
@@ -464,7 +468,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
BSONObj err = conn->getLastErrorDetailed();
string errMsg = DBClientWithCommands::getLastErrorString(err);
- // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ // TODO: Clean up all the extra code to exit this method, probably with a
+ // refactor
if (!errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1) {
logErrMsgOrWarn(
"Could not force lock", lockName, errMsg, "(another force won");
@@ -475,8 +480,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
}
} catch (UpdateNotTheSame&) {
- // Ok to continue since we know we forced at least one lock document, and all lock docs
- // are required for a lock to be held.
+ // Ok to continue since we know we forced at least one lock document, and all
+ // lock docs are required for a lock to be held.
warning() << "lock forcing " << lockName << " inconsistent" << endl;
} catch (const LockException&) {
// Let the exception go up and don't repackage the exception.
@@ -495,8 +500,9 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
// are acquired.
resetLastPing();
- // Test that the lock is held by trying to update the finalized state of the lock to the same state
- // if it does not update or does not update on all servers, we can't re-enter.
+ // Test that the lock is held by trying to update the finalized state of the lock to
+ // the same state if it does not update or does not update on all servers, we can't
+ // re-enter.
try {
// Test the lock with the correct "ts" (OID) value
conn->update(LocksType::ConfigNS,
@@ -508,7 +514,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
BSONObj err = conn->getLastErrorDetailed();
string errMsg = DBClientWithCommands::getLastErrorString(err);
- // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ // TODO: Clean up all the extra code to exit this method, probably with a
+ // refactor
if (!errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1) {
logErrMsgOrWarn(
"Could not re-enter lock", lockName, errMsg, "(not sure lock is held");
@@ -541,7 +548,8 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
LOG(logLvl - 1) << "lock '" << lockName << "' successfully forced" << endl;
- // We don't need the ts value in the query, since we will only ever replace locks with state=0.
+ // We don't need the ts value in the query, since we will only ever replace locks with
+ // state=0.
}
// Case 3: We have an expired lock
else if (o[LocksType::lockID()].type()) {
@@ -549,8 +557,9 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
}
}
- // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
- // and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
+ // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock
+ // state is open and no locks need to be forced. If anything goes wrong, we don't want to
+ // remember an old lock.
resetLastPing();
bool gotLock = false;
@@ -607,10 +616,11 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
indUpdate = indDB->findOne(LocksType::ConfigNS, BSON(LocksType::name(_name)));
// If we override this lock in any way, grab and protect it.
- // We assume/ensure that if a process does not have all lock documents, it is no longer
- // holding the lock.
- // Note - finalized locks may compete too, but we know they've won already if competing
- // in this round. Cleanup of crashes during finalizing may take a few tries.
+ // We assume/ensure that if a process does not have all lock documents, it is no
+ // longer holding the lock.
+ // Note - finalized locks may compete too, but we know they've won already if
+ // competing in this round. Cleanup of crashes during finalizing may take a few
+ // tries.
if (indUpdate[LocksType::lockID()] < lockDetails[LocksType::lockID()] ||
indUpdate[LocksType::state()].numberInt() == 0) {
BSONObj grabQuery =
@@ -622,8 +632,9 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
BSON(LocksType::lockID(lockDetails[LocksType::lockID()].OID())
<< LocksType::state(1));
- // Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
- // process grabbed the lock (which will change the ts), but the lock will be set until forcing
+ // Either our update will succeed, and we'll grab the lock, or it will fail b/c
+ // some other process grabbed the lock (which will change the ts), but the lock
+ // will be set until forcing
indDB->update(LocksType::ConfigNS, grabQuery, BSON("$set" << grabChanges));
indUpdate = indDB->findOne(LocksType::ConfigNS, BSON(LocksType::name(_name)));
@@ -687,11 +698,12 @@ bool DistributedLock::lock_try(const string& why, BSONObj* other, double timeout
// Complete lock propagation
if (gotLock) {
- // This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
- // least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
- // The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
- // indicates the lock is active, but this means the process creating/destroying them must explicitly poll
- // when something goes wrong.
+ // This is now safe, since we know that no new locks will be placed on top of the ones we've
+ // checked for at least 15 minutes. Sets the state = 2, so that future clients can
+ // determine that the lock is truly set. The invariant for rollbacks is that we will never
+ // force locks with state = 2 and active pings, since that indicates the lock is active, but
+ // this means the process creating/destroying them must explicitly poll when something goes
+ // wrong.
try {
BSONObjBuilder finalLockDetails;
BSONObjIterator bi(lockDetails);
diff --git a/src/mongo/s/catalog/legacy/distlock.h b/src/mongo/s/catalog/legacy/distlock.h
index b267be42a04..638608025d2 100644
--- a/src/mongo/s/catalog/legacy/distlock.h
+++ b/src/mongo/s/catalog/legacy/distlock.h
@@ -136,12 +136,13 @@ public:
static LastPings lastPings;
/**
- * The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
- * Construction does trigger a lock "pinging" mechanism, though.
+ * The constructor does not connect to the configdb yet and constructing does not mean the lock
+ * was acquired. Construction does trigger a lock "pinging" mechanism, though.
*
* @param conn address of config(s) server(s)
* @param name identifier for the lock
- * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
+ * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it
+ * (in minutes).
* @param lockPing how long to wait between lock pings
* @param legacy use legacy logic
*
@@ -153,12 +154,13 @@ public:
~DistributedLock(){};
/**
- * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
- * consider using the dist_lock_try construct to acquire this lock in an exception safe way.
+ * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous
+ * holder. Please consider using the dist_lock_try construct to acquire this lock in an
+ * exception safe way.
*
* @param why human readable description of why the lock is being taken (used to log)
- * @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
- * details if not
+ * @param other configdb's lock document that is currently holding the lock, if lock is taken,
+ * or our own lock details if not
* @return true if it managed to grab the lock
*/
bool lock_try(const std::string& why, BSONObj* other = 0, double timeout = 0.0);
diff --git a/src/mongo/s/catalog/type_changelog.h b/src/mongo/s/catalog/type_changelog.h
index 7a0360fd7a1..c7d15ad4856 100644
--- a/src/mongo/s/catalog/type_changelog.h
+++ b/src/mongo/s/catalog/type_changelog.h
@@ -259,8 +259,8 @@ private:
// Convention: (M)andatory, (O)ptional, (S)pecial rule.
std::string _changeID; // (M) id for this change "<hostname>-<current_time>-<increment>"
bool _isChangeIDSet;
- std::string
- _server; // (M) hostname of server that we are making the change on. Does not include port.
+ // (M) hostname of server that we are making the change on. Does not include port.
+ std::string _server;
bool _isServerSet;
std::string _clientAddr; // (M) hostname:port of the client that made this change
bool _isClientAddrSet;
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index ede622930f5..ba1a61d4298 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -559,7 +559,8 @@ bool Chunk::splitIfShould(long dataWritten) const {
}
if (_maxIsInf() || _minIsInf()) {
- // we don't want to reset _dataWritten since we kind of want to check the other side right away
+ // we don't want to reset _dataWritten since we kind of want to check the other side
+ // right away
} else {
// we're splitting, so should wait a bit
_dataWritten = 0;
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index d4fd7857d30..6a855fdcfe6 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -129,7 +129,8 @@ public:
bool splitIfShould(long dataWritten) const;
/**
- * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
+ * Splits this chunk at a non-specificed split key to be chosen by the
+ * mongod holding this chunk.
*
* @param mode
* @param res the object containing details about the split execution
@@ -294,7 +295,10 @@ private:
*/
void determineSplitPoints(bool atMedian, std::vector<BSONObj>* splitPoints) const;
- /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */
+ /**
+ * initializes _dataWritten with a random value so that a mongos restart
+ * wouldn't cause delay in splitting
+ */
static int mkDataWritten();
};
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index c2c7a48286d..63e563e7d67 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -146,7 +146,8 @@ public:
}
/**
- * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated
+ * this is just an increasing number of how many ChunkManagers we have so we know if something
+ * has been updated
*/
unsigned long long getSequenceNumber() const {
return _sequenceNumber;
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index 78c2a693846..136c8001a95 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -87,7 +87,8 @@ public:
return false;
}
- // using localhost in server names implies every other process must use localhost addresses too
+ // using localhost in server names implies every other process must use localhost addresses
+ // too
std::vector<HostAndPort> serverAddrs = servers.getServers();
for (size_t i = 0; i < serverAddrs.size(); i++) {
if (serverAddrs[i].isLocalHost() != grid.allowLocalHost()) {
diff --git a/src/mongo/s/commands/cluster_db_stats_cmd.cpp b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
index a5a50c867cf..3378de75ca5 100644
--- a/src/mongo/s/commands/cluster_db_stats_cmd.cpp
+++ b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
@@ -79,7 +79,8 @@ public:
}
}
- // result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to get this
+ // TODO: need to find a good way to get this
+ // result.appendNumber( "collections" , ncollections );
output.appendNumber("objects", objects);
// avgObjSize on mongod is not scaled based on the argument to db.stats(), so we use
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 395f84c8c7e..b353497d4fe 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -1108,7 +1108,8 @@ public:
verify(0);
}
- // We could support arbitrary shard keys by sending commands to all shards but I don't think we should
+ // We could support arbitrary shard keys by sending commands to all shards but I don't think
+ // we should
errmsg =
"GridFS fs.chunks collection must be sharded on either {files_id:1} or {files_id:1, "
"n:1}";
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 7481f949a2a..919f38608d1 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -210,8 +210,8 @@ void DBConfig::getChunkManagerOrPrimary(const string& ns,
std::shared_ptr<ChunkManager>& manager,
std::shared_ptr<Shard>& primary) {
// The logic here is basically that at any time, our collection can become sharded or unsharded
- // via a command. If we're not sharded, we want to send data to the primary, if sharded, we want
- // to send data to the correct chunks, and we can't check both w/o the lock.
+ // via a command. If we're not sharded, we want to send data to the primary, if sharded, we
+ // want to send data to the correct chunks, and we can't check both w/o the lock.
manager.reset();
primary.reset();
@@ -228,9 +228,10 @@ void DBConfig::getChunkManagerOrPrimary(const string& ns,
} else {
CollectionInfo& cInfo = i->second;
- // TODO: we need to be careful about handling shardingEnabled, b/c in some places we seem to use and
- // some we don't. If we use this function in combination with just getChunkManager() on a slightly
- // borked config db, we'll get lots of staleconfig retries
+ // TODO: we need to be careful about handling shardingEnabled, b/c in some places we
+ // seem to use and some we don't. If we use this function in combination with just
+ // getChunkManager() on a slightly borked config db, we'll get lots of staleconfig
+ // retries
if (_shardingEnabled && cInfo.isSharded()) {
manager = cInfo.getCM();
} else {
@@ -294,7 +295,8 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(const string& ns,
invariant(!key.isEmpty());
// TODO: We need to keep this first one-chunk check in until we have a more efficient way of
- // creating/reusing a chunk manager, as doing so requires copying the full set of chunks currently
+ // creating/reusing a chunk manager, as doing so requires copying the full set of chunks
+ // currently
vector<ChunkType> newestChunk;
if (oldVersion.isSet() && !forceReload) {
uassertStatusOK(grid.catalogManager()->getChunks(
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index a0893be2524..d195b85fa48 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -514,7 +514,8 @@ public:
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
- // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
+ // there's a fair amount of slack before we determine a chunk is too large because object
+ // sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = collection->numRecords(txn);
@@ -936,7 +937,8 @@ public:
}
} initialCloneCommand;
-// Tests can pause / resume moveChunk's progress at each step by enabling / disabling each fail point.
+// Tests can pause / resume moveChunk's progress at each step by enabling / disabling each fail
+// point.
MONGO_FP_DECLARE(moveChunkHangAtStep1);
MONGO_FP_DECLARE(moveChunkHangAtStep2);
MONGO_FP_DECLARE(moveChunkHangAtStep3);
@@ -1466,8 +1468,8 @@ public:
log() << "moveChunk setting version to: " << myVersion << migrateLog;
// 5.b
- // we're under the collection lock here, too, so we can undo the chunk donation because no other state change
- // could be ongoing
+ // we're under the collection lock here, too, so we can undo the chunk donation because
+ // no other state change could be ongoing
BSONObj res;
bool ok;
@@ -1511,12 +1513,13 @@ public:
// 5.c
// version at which the next highest lastmod will be set
- // if the chunk being moved is the last in the shard, nextVersion is that chunk's lastmod
- // otherwise the highest version is from the chunk being bumped on the FROM-shard
+ // if the chunk being moved is the last in the shard, nextVersion is that chunk's
+ // lastmod otherwise the highest version is from the chunk being bumped on the
+ // FROM-shard
ChunkVersion nextVersion;
- // we want to go only once to the configDB but perhaps change two chunks, the one being migrated and another
- // local one (so to bump version for the entire shard)
+ // we want to go only once to the configDB but perhaps change two chunks, the one being
+ // migrated and another local one (so to bump version for the entire shard)
// we use the 'applyOps' mechanism to group the two updates and make them safer
// TODO pull config update code to a module
@@ -1657,9 +1660,11 @@ public:
// this could be a blip in the connectivity
// wait out a few seconds and check if the commit request made it
//
- // if the commit made it to the config, we'll see the chunk in the new shard and there's no action
- // if the commit did not make it, currently the only way to fix this state is to bounce the mongod so
- // that the old state (before migrating) be brought in
+ // if the commit made it to the config, we'll see the chunk in the new shard and
+ // there's no action
+ //
+ // if the commit did not make it, currently the only way to fix this state is to
+ // bounce the mongod so that the old state (before migrating) be brought in
warning() << "moveChunk commit outcome ongoing" << migrateLog;
sleepsecs(10);
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index cbf81006723..5501cd3795d 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -286,8 +286,9 @@ public:
string& errmsg,
BSONObjBuilder& result) {
//
- // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
- // a good approximation of the size of the chunk -- without needing to access the actual data.
+ // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split
+ // index to get a good approximation of the size of the chunk -- without needing to
+ // access the actual data.
//
const std::string ns = parseNs(dbname, jsobj);
@@ -298,7 +299,8 @@ public:
return false;
}
- // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
+ // If min and max are not provided use the "minKey" and "maxKey" for the sharding key
+ // pattern.
BSONObj min = jsobj.getObjectField("min");
BSONObj max = jsobj.getObjectField("max");
if (min.isEmpty() != max.isEmpty()) {
@@ -354,12 +356,12 @@ public:
const long long dataSize = collection->dataSize(txn);
//
- // 1.b Now that we have the size estimate, go over the remaining parameters and apply any maximum size
- // restrictions specified there.
+ // 1.b Now that we have the size estimate, go over the remaining parameters and apply
+ // any maximum size restrictions specified there.
//
- // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current chunk, i.e., the
- // logic below will split that chunk in half
+ // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current
+ // chunk, i.e., the logic below will split that chunk in half
long long maxChunkSize = 0;
bool forceMedianSplit = false;
{
@@ -381,8 +383,8 @@ public:
}
}
- // We need a maximum size for the chunk, unless we're not actually capable of finding any
- // split points.
+ // We need a maximum size for the chunk, unless we're not actually capable of
+ // finding any split points.
if (maxChunkSize <= 0 && recCount != 0) {
errmsg =
"need to specify the desired max chunk size (maxChunkSize or "
@@ -402,9 +404,9 @@ public:
log() << "request split points lookup for chunk " << ns << " " << min << " -->> " << max
<< endl;
- // We'll use the average object size and number of object to find approximately how many keys
- // each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, if
- // provided.
+ // We'll use the average object size and number of object to find approximately how many
+ // keys each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects,
+ // if provided.
const long long avgRecSize = dataSize / recCount;
long long keyCount = maxChunkSize / (2 * avgRecSize);
if (maxChunkObjects && (maxChunkObjects < keyCount)) {
@@ -433,9 +435,9 @@ public:
return false;
}
- // Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel, to be removed
- // at the end. If a key appears more times than entries allowed on a chunk, we issue a warning and
- // split on the following key.
+ // Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel,
+ // to be removed at the end. If a key appears more times than entries allowed on a
+ // chunk, we issue a warning and split on the following key.
set<BSONObj> tooFrequentKeys;
splitKeys.push_back(
prettyKey(idx->keyPattern(), currKey.getOwned()).extractFields(keyPattern));
@@ -448,7 +450,8 @@ public:
if (currCount > keyCount && !forceMedianSplit) {
currKey = prettyKey(idx->keyPattern(), currKey.getOwned())
.extractFields(keyPattern);
- // Do not use this split key if it is the same used in the previous split point.
+ // Do not use this split key if it is the same used in the previous split
+ // point.
if (currKey.woCompare(splitKeys.back()) == 0) {
tooFrequentKeys.insert(currKey.getOwned());
} else {
@@ -492,8 +495,8 @@ public:
}
//
- // 3. Format the result and issue any warnings about the data we gathered while traversing the
- // index
+ // 3. Format the result and issue any warnings about the data we gathered while
+ // traversing the index
//
// Warn for keys that are more numerous than maxChunkSize allows.
@@ -715,7 +718,8 @@ public:
log() << "splitChunk accepted at version " << shardVersion;
//
- // 3. create the batch of updates to metadata ( the new chunks ) to be applied via 'applyOps' command
+ // 3. create the batch of updates to metadata ( the new chunks ) to be applied via
+ // 'applyOps' command
//
BSONObjBuilder logDetail;
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index d0e1d502ae4..917f9fe6ab9 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -325,8 +325,8 @@ public:
}
private:
- bool
- _forceVersionOk; // if this is true, then chunk version #s aren't check, and all ops are allowed
+ // if this is true, then chunk version #s aren't check, and all ops are allowed
+ bool _forceVersionOk;
typedef std::map<std::string, ChunkVersion> NSVersionMap;
NSVersionMap _versions;
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index e36e1efac54..e58fec42a15 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -81,8 +81,8 @@ void Grid::setAllowLocalHost(bool allow) {
}
/*
- * Returns whether balancing is enabled, with optional namespace "ns" parameter for balancing on a particular
- * collection.
+ * Returns whether balancing is enabled, with optional namespace "ns" parameter for balancing on a
+ * particular collection.
*/
bool Grid::shouldBalance(const SettingsType& balancerSettings) const {
if (balancerSettings.isBalancerStoppedSet() && balancerSettings.getBalancerStopped()) {
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 94ed60e91de..ad8eac354b4 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -330,7 +330,8 @@ static int _main() {
ExitCode exitCode = runMongosServer(mongosGlobalParams.upgrade);
- // To maintain backwards compatibility, we exit with EXIT_NET_ERROR if the listener loop returns.
+ // To maintain backwards compatibility, we exit with EXIT_NET_ERROR if the listener loop
+ // returns.
if (exitCode == EXIT_NET_ERROR) {
dbexit(EXIT_NET_ERROR);
}
diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h
index a9fee62cde3..67cdced4e4f 100644
--- a/src/mongo/scripting/bson_template_evaluator.h
+++ b/src/mongo/scripting/bson_template_evaluator.h
@@ -174,7 +174,8 @@ private:
BSONObjBuilder& out);
/*
- * Operator method to support #RAND_INT_PLUS_THREAD : { key : { #RAND_INT_PLUS_THREAD: [10, 20] } }
+ * Operator method to support
+ * #RAND_INT_PLUS_THREAD : { key : { #RAND_INT_PLUS_THREAD: [10, 20] } }
* See #RAND_INT above for definition. This variation differs from the base in the
* it uses the upper bound of the requested range to segment the ranges by
* the thread_id of the TemplateEvaluator - thus
diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp
index d458dc0f119..d4a4c2850f4 100644
--- a/src/mongo/scripting/bson_template_evaluator_test.cpp
+++ b/src/mongo/scripting/bson_template_evaluator_test.cpp
@@ -681,7 +681,8 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
ASSERT_LESS_THAN(randValue1, 5);
// Test success when operators are arbitrarily nested within multiple elements
- // {id: { foo: "hi", bar: { #op: [1, 5] }, baz: { baz_a: { #op, [5, 10] }, baz_b: { #op, [10, 15] }, baz_c: "bye" } }
+ // {id: { foo: "hi", bar: { #op: [1, 5] }, baz: { baz_a: { #op, [5, 10] }, baz_b: { #op, [10,
+ // 15] }, baz_c: "bye" } }
BSONObjBuilder builder4;
BSONObj barObj4 = BSON("#RAND_INT" << BSON_ARRAY(1 << 5));
BSONObj bazObj4a = BSON("#RAND_INT" << BSON_ARRAY(5 << 10));
diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp
index 5fd3371f374..641a4389e2d 100644
--- a/src/mongo/shell/linenoise.cpp
+++ b/src/mongo/shell/linenoise.cpp
@@ -890,7 +890,8 @@ void InputBuffer::refreshLine(PromptBase& pi) {
#ifndef _WIN32
/**
- * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode (UChar32) character it encodes
+ * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode (UChar32) character it
+ * encodes
*
* @return UChar32 Unicode character
*/
@@ -1258,17 +1259,22 @@ static UChar32 linenoiseReadChar(void) {
char buf[1024];
sprintf(
buf,
- "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X, virtual scancode 0x%04X, key %s%s%s%s%s\n",
+ "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X, "
+ "virtual scancode 0x%04X, key %s%s%s%s%s\n",
rec.Event.KeyEvent.uChar.UnicodeChar,
rec.Event.KeyEvent.wRepeatCount,
rec.Event.KeyEvent.wVirtualKeyCode,
rec.Event.KeyEvent.wVirtualScanCode,
rec.Event.KeyEvent.bKeyDown ? "down" : "up",
- (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ? " L-Ctrl" : "",
- (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ? " R-Ctrl" : "",
- (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ? " L-Alt" : "",
- (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ? " R-Alt" : ""
- );
+ (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ?
+ " L-Ctrl" : "",
+ (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ?
+ " R-Ctrl" : "",
+ (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ?
+ " L-Alt" : "",
+ (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ?
+ " R-Alt" : ""
+ );
OutputDebugStringA( buf );
//}
}
@@ -1277,15 +1283,17 @@ static UChar32 linenoiseReadChar(void) {
if (rec.EventType != KEY_EVENT) {
continue;
}
- // Windows provides for entry of characters that are not on your keyboard by sending the Unicode
- // characters as a "key up" with virtual keycode 0x12 (VK_MENU == Alt key) ... accept these characters,
- // otherwise only process characters on "key down"
+ // Windows provides for entry of characters that are not on your keyboard by sending the
+ // Unicode characters as a "key up" with virtual keycode 0x12 (VK_MENU == Alt key) ...
+ // accept these characters, otherwise only process characters on "key down"
if (!rec.Event.KeyEvent.bKeyDown && rec.Event.KeyEvent.wVirtualKeyCode != VK_MENU) {
continue;
}
modifierKeys = 0;
- // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't treat this combination as either CTRL or META
- // we just turn off those two bits, so it is still possible to combine CTRL and/or META with an AltGr key by using right-Ctrl and/or left-Alt
+ // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't treat this
+ // combination as either CTRL or META we just turn off those two bits, so it is still
+ // possible to combine CTRL and/or META with an AltGr key by using right-Ctrl and/or
+ // left-Alt
if ((rec.Event.KeyEvent.dwControlKeyState & (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) ==
(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) {
rec.Event.KeyEvent.dwControlKeyState &= ~(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED);
@@ -1438,16 +1446,16 @@ static const size_t completionCountCutoff = 100;
/**
* Handle command completion, using a completionCallback() routine to provide possible substitutions
- * This routine handles the mechanics of updating the user's input buffer with possible replacement of
- * text as the user selects a proposed completion string, or cancels the completion attempt.
+ * This routine handles the mechanics of updating the user's input buffer with possible replacement
+ * of text as the user selects a proposed completion string, or cancels the completion attempt.
* @param pi PromptBase struct holding information about the prompt and our screen position
*/
int InputBuffer::completeLine(PromptBase& pi) {
linenoiseCompletions lc;
char c = 0;
- // completionCallback() expects a parsable entity, so find the previous break character and extract
- // a copy to parse. we also handle the case where tab is hit while not at end-of-line.
+ // completionCallback() expects a parsable entity, so find the previous break character and
+ // extract a copy to parse. we also handle the case where tab is hit while not at end-of-line.
int startIndex = pos;
while (--startIndex >= 0) {
if (strchr(breakChars, buf32[startIndex])) {
@@ -1705,10 +1713,11 @@ void InputBuffer::clearScreen(PromptBase& pi) {
}
/**
- * Incremental history search -- take over the prompt and keyboard as the user types a search string,
- * deletes characters from it, changes direction, and either accepts the found line (for execution or
- * editing) or cancels.
- * @param pi PromptBase struct holding information about the (old, static) prompt and our screen position
+ * Incremental history search -- take over the prompt and keyboard as the user types a search
+ * string, deletes characters from it, changes direction, and either accepts the found line (for
+ * execution orediting) or cancels.
+ * @param pi PromptBase struct holding information about the (old, static) prompt and our
+ * screen position
* @param startChar the character that began the search, used to set the initial direction
*/
int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) {
@@ -1716,7 +1725,8 @@ int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) {
size_t ucharCount;
int errorCode;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't have to
+ // special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
bufferSize = sizeof(UChar32) * len + 1;
@@ -2272,7 +2282,8 @@ int InputBuffer::getInputLine(PromptBase& pi) {
case DOWN_ARROW_KEY:
case UP_ARROW_KEY:
killRing.lastAction = KillRing::actionOther;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't
+ // have to special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
size_t tempBufferSize = sizeof(UChar32) * len + 1;
@@ -2475,7 +2486,8 @@ int InputBuffer::getInputLine(PromptBase& pi) {
case META + '>': // meta->, end of history
case PAGE_DOWN_KEY: // Page Down, end of history
killRing.lastAction = KillRing::actionOther;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't
+ // have to special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
size_t tempBufferSize = sizeof(UChar32) * len + 1;
@@ -2611,7 +2623,8 @@ void linenoisePreloadBuffer(const char* preloadText) {
* call it with a prompt to display and it will return a line of input from the user
*
* @param prompt text of prompt to display to the user
- * @return the returned string belongs to the caller on return and must be freed to prevent memory leaks
+ * @return the returned string belongs to the caller on return and must be freed to prevent
+ * memory leaks
*/
char* linenoise(const char* prompt) {
if (isatty(STDIN_FILENO)) { // input is from a terminal
diff --git a/src/mongo/shell/linenoise_utf8.cpp b/src/mongo/shell/linenoise_utf8.cpp
index 1a01aec6696..73d8168ac56 100644
--- a/src/mongo/shell/linenoise_utf8.cpp
+++ b/src/mongo/shell/linenoise_utf8.cpp
@@ -43,8 +43,8 @@ namespace linenoise_utf8 {
* Convert a null terminated UTF-8 string from UTF-8 and store it in a UChar32 destination buffer
* Always null terminates the destination string if at least one character position is available
* Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will be
- * converted to the Unicode error character U+FFFD and flag bits will be set in the conversionErrorCode
- * int.
+ * converted to the Unicode error character U+FFFD and flag bits will be set in the
+ * conversionErrorCode int.
*
* @param uchar32output Destination UChar32 buffer
* @param utf8input Source UTF-8 string
@@ -212,15 +212,16 @@ void copyString32(UChar32* dest32, const UChar32* source32, size_t destLengthInC
}
/**
- * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 string to UTF-8
- * and store it in a UChar8 destination buffer
+ * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 string
+ * to UTF-8 and store it in a UChar8 destination buffer
* Always null terminates the destination string if at least one character position is available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
* @param charCount Maximum number of UChar32 characters to process
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8counted(UChar8* dest8,
const UChar32* source32,
@@ -263,7 +264,8 @@ size_t copyString32to8counted(UChar8* dest8,
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8(UChar8* dest8, const UChar32* source32, size_t outputBufferSizeInBytes) {
return copyString32to8counted(dest8, source32, outputBufferSizeInBytes, 0x7FFFFFFF);
@@ -303,7 +305,8 @@ int strncmp32(UChar32* first32, UChar32* second32, size_t length) {
}
/**
- * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to fileHandle
+ * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to
+ * fileHandle
*
* @param fileHandle File handle to write to
* @param string32 Source UChar32 characters, may not be null terminated
diff --git a/src/mongo/shell/linenoise_utf8.h b/src/mongo/shell/linenoise_utf8.h
index b459ea344dc..4bd4c2bdc7e 100644
--- a/src/mongo/shell/linenoise_utf8.h
+++ b/src/mongo/shell/linenoise_utf8.h
@@ -41,11 +41,11 @@ typedef unsigned int UChar32; // Unicode code point
enum BadUTF8 { BadUTF8_no_error = 0x00, BadUTF8_invalid_byte = 0x01, BadUTF8_surrogate = 0x02 };
/**
- * Convert a null terminated UTF-8 std::string from UTF-8 and store it in a UChar32 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
- * Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will be
- * converted to the Unicode error character U+FFFD and flag bits will be set in the conversionErrorCode
- * int.
+ * Convert a null terminated UTF-8 std::string from UTF-8 and store it in a UChar32 destination
+ * buffer Always null terminates the destination std::string if at least one character position is
+ * available Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will
+ * be converted to the Unicode error character U+FFFD and flag bits will be set in the
+ * conversionErrorCode int.
*
* @param uchar32output Destination UChar32 buffer
* @param utf8input Source UTF-8 string
@@ -61,7 +61,8 @@ void copyString8to32(UChar32* uchar32output,
/**
* Copy a null terminated UChar32 std::string to a UChar32 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest32 Destination UChar32 buffer
* @param source32 Source UChar32 string
@@ -70,15 +71,17 @@ void copyString8to32(UChar32* uchar32output,
void copyString32(UChar32* dest32, const UChar32* source32, size_t destLengthInCharacters);
/**
- * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 std::string to UTF-8
- * and store it in a UChar8 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Convert a specified number of UChar32 characters from a possibly null terminated UChar32
+ * std::string to UTF-8 and store it in a UChar8 destination buffer
+ * Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
* @param charCount Maximum number of UChar32 characters to process
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8counted(UChar8* dest8,
const UChar32* source32,
@@ -86,13 +89,16 @@ size_t copyString32to8counted(UChar8* dest8,
size_t charCount);
/**
- * Convert a null terminated UChar32 std::string to UTF-8 and store it in a UChar8 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Convert a null terminated UChar32 std::string to UTF-8 and store it in a UChar8 destination
+ * buffer
+ * Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8(UChar8* dest8, const UChar32* source32, size_t outputBufferSizeInBytes);
@@ -115,7 +121,8 @@ size_t strlen32(const UChar32* str32);
int strncmp32(UChar32* first32, UChar32* second32, size_t length);
/**
- * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to fileHandle
+ * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to
+ * fileHandle
*
* @param fileHandle File handle to write to
* @param string32 Source UChar32 character array, may not be null terminated
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index ddba7fc8e28..36d8416d33a 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -744,8 +744,9 @@ int killDb(int port, ProcessId _pid, int signal, const BSONObj& opt) {
} else {
registry.deletePid(pid);
}
- // FIXME I think the intention here is to do an extra sleep only when SIGKILL is sent to the child process.
- // We may want to change the 4 below to 29, since values of i greater than that indicate we sent a SIGKILL.
+ // FIXME I think the intention here is to do an extra sleep only when SIGKILL is sent to the
+ // child process. We may want to change the 4 below to 29, since values of i greater than that
+ // indicate we sent a SIGKILL.
if (i > 4 || signal == SIGKILL) {
sleepmillis(4000); // allow operating system to reclaim resources
}
diff --git a/src/mongo/stdx/functional.h b/src/mongo/stdx/functional.h
index 5850ce36eab..d86973b2066 100644
--- a/src/mongo/stdx/functional.h
+++ b/src/mongo/stdx/functional.h
@@ -31,7 +31,8 @@
// As of VS2013, the Windows STL still doesn't have an adequate implementation
// of std::function.
//
-// See https://connect.microsoft.com/VisualStudio/feedback/details/768899/std-function-not-compiling-in-vs2012
+// See https://connect.microsoft.com/VisualStudio/feedback/details/768899/
+// std-function-not-compiling-in-vs2012
//
// The bug is fixed in VS2015.
#if !defined(_MSC_VER) || (_MSC_VER > 1800)
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index c6075e9cf5d..559055f1a27 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -90,12 +90,15 @@ struct ExceptionInfo {
int code;
};
-/** helper class that builds error strings. lighter weight than a StringBuilder, albeit less flexible.
- NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code path when used.
-
- example:
- throw UserException(123, ErrorMsg("blah", num_val));
-*/
+/**
+ * helper class that builds error strings. lighter weight than a StringBuilder, albeit less
+ * flexible.
+ * NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code
+ * path when used.
+ *
+ * example:
+ * throw UserException(123, ErrorMsg("blah", num_val));
+ */
class ErrorMsg {
public:
ErrorMsg(const char* msg, char ch);
diff --git a/src/mongo/util/background.h b/src/mongo/util/background.h
index 03fcf945f0e..1793944d02b 100644
--- a/src/mongo/util/background.h
+++ b/src/mongo/util/background.h
@@ -74,8 +74,8 @@ protected:
* after this returns, deleted if deleteSelf true.
*
* NOTE:
- * if run() throws, the exception will be caught within 'this' object and will ultimately lead to the
- * BackgroundJob's thread being finished, as if run() returned.
+ * if run() throws, the exception will be caught within 'this' object and will ultimately lead
+ * to the BackgroundJob's thread being finished, as if run() returned.
*
*/
virtual void run() = 0;
diff --git a/src/mongo/util/concurrency/value.h b/src/mongo/util/concurrency/value.h
index 4be9c3d14e5..0189c4ed521 100644
--- a/src/mongo/util/concurrency/value.h
+++ b/src/mongo/util/concurrency/value.h
@@ -74,7 +74,8 @@ public:
set(rhs.get());
}
- // == is not defined. use get() == ... instead. done this way so one thinks about if composing multiple operations
+ // == is not defined. use get() == ... instead. done this way so one thinks about if composing
+ // multiple operations
bool operator==(const std::string& s) const;
};
}
diff --git a/src/mongo/util/debugger.cpp b/src/mongo/util/debugger.cpp
index eb51aa317c3..659c8b726f2 100644
--- a/src/mongo/util/debugger.cpp
+++ b/src/mongo/util/debugger.cpp
@@ -46,7 +46,8 @@ void breakpoint() {
#ifndef _WIN32
// code to raise a breakpoint in GDB
ONCE {
- // prevent SIGTRAP from crashing the program if default action is specified and we are not in gdb
+ // prevent SIGTRAP from crashing the program if default action is specified and we are not
+ // in gdb
struct sigaction current;
sigaction(SIGTRAP, NULL, &current);
if (current.sa_handler == SIG_DFL) {
diff --git a/src/mongo/util/descriptive_stats-inl.h b/src/mongo/util/descriptive_stats-inl.h
index 1b7c91595d1..1ae73134063 100644
--- a/src/mongo/util/descriptive_stats-inl.h
+++ b/src/mongo/util/descriptive_stats-inl.h
@@ -82,9 +82,13 @@ DistributionEstimators<NumQuantiles>::DistributionEstimators()
* The quantile estimation follows the extended_p_square implementation in boost.accumulators.
* It differs by removing the ability to request arbitrary quantiles and computing exactly
* 'NumQuantiles' equidistant quantiles (plus minimum and maximum) instead.
- * See http://www.boost.org/doc/libs/1_51_0/doc/html/boost/accumulators/impl/extended_p_square_impl.html ,
- * R. Jain and I. Chlamtac, The P^2 algorithmus for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085. and
- * K. E. E. Raatikainen, Simultaneous estimation of several quantiles, Simulation, Volume 49, Number 4 (October), 1986, p. 159-164.
+ * See http://www.boost.org/doc/libs/1_51_0/doc/html/boost/accumulators/impl/
+ * extended_p_square_impl.html ,
+ * R. Jain and I. Chlamtac, The P^2 algorithmus for dynamic calculation of quantiles and histograms
+ * without storing observations, Communications of the ACM, Volume 28
+ * (October), Number 10, 1985, p. 1076-1085. and
+ * K. E. E. Raatikainen, Simultaneous estimation of several quantiles, Simulation,
+ * Volume 49, Number 4 (October), 1986, p. 159-164.
*/
template <std::size_t NumQuantiles>
DistributionEstimators<NumQuantiles>& DistributionEstimators<NumQuantiles>::operator<<(
diff --git a/src/mongo/util/moveablebuffer.h b/src/mongo/util/moveablebuffer.h
index 981197028af..035c4b4c68c 100644
--- a/src/mongo/util/moveablebuffer.h
+++ b/src/mongo/util/moveablebuffer.h
@@ -32,9 +32,11 @@
namespace mongo {
-/** this is a sort of smart pointer class where we can move where something is and all the pointers will adjust.
- not threadsafe.
- */
+/**
+ * this is a sort of smart pointer class where we can move where something is and all the pointers
+ * will adjust.
+ * not threadsafe.
+ */
struct MoveableBuffer {
MoveableBuffer();
MoveableBuffer(void*);
@@ -46,7 +48,8 @@ struct MoveableBuffer {
/* implementation (inlines) below */
-// this is a temp stub implementation...not really done yet - just having everything compile & such for checkpointing into git
+// this is a temp stub implementation...not really done yet - just having everything compile & such
+// for checkpointing into git
inline MoveableBuffer::MoveableBuffer() : p(0) {}
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
index 390c23bc443..d9528375b08 100644
--- a/src/mongo/util/net/listen.h
+++ b/src/mongo/util/net/listen.h
@@ -97,7 +97,8 @@ public:
if (_timeTracker)
return _timeTracker->getMyElapsedTimeMillis();
- // should this assert or throw? seems like callers may not expect to get zero back, certainly not forever.
+ // should this assert or throw? seems like callers may not expect to get zero back,
+ // certainly not forever.
return 0;
}
diff --git a/src/mongo/util/net/message.h b/src/mongo/util/net/message.h
index d83492e519c..fa4bbb19d90 100644
--- a/src/mongo/util/net/message.h
+++ b/src/mongo/util/net/message.h
@@ -337,7 +337,8 @@ inline int ConstView::dataLen() const {
class Message {
public:
- // we assume here that a vector with initial size 0 does no allocation (0 is the default, but wanted to make it explicit).
+ // we assume here that a vector with initial size 0 does no allocation (0 is the default, but
+ // wanted to make it explicit).
Message() : _buf(0), _data(0), _freeIt(false) {}
Message(void* data, bool freeIt) : _buf(0), _data(0), _freeIt(false) {
_setData(reinterpret_cast<char*>(data), freeIt);
@@ -493,7 +494,8 @@ private:
}
// if just one buffer, keep it in _buf, otherwise keep a sequence of buffers in _data
char* _buf;
- // byte buffer(s) - the first must contain at least a full MsgData unless using _buf for storage instead
+ // byte buffer(s) - the first must contain at least a full MsgData unless using _buf for storage
+ // instead
typedef std::vector<std::pair<char*, int>> MsgVec;
MsgVec _data;
bool _freeIt;
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index 8fae1b34a82..092c9feba4c 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -194,7 +194,8 @@ bool MessagingPort::recv(Message& m) {
send(s.c_str(), s.size(), "http");
return false;
} else if (len == -1) {
- // Endian check from the client, after connecting, to see what mode server is running in.
+ // Endian check from the client, after connecting, to see what mode
+ // server is running in.
unsigned foo = 0x10203040;
send((char*)&foo, 4, "endian");
psock->setHandshakeReceived();
diff --git a/src/mongo/util/net/message_port.h b/src/mongo/util/net/message_port.h
index 6af98b160a9..8769c91c8b6 100644
--- a/src/mongo/util/net/message_port.h
+++ b/src/mongo/util/net/message_port.h
@@ -46,11 +46,8 @@ class AbstractMessagingPort {
public:
AbstractMessagingPort() : tag(0), _connectionId(0) {}
virtual ~AbstractMessagingPort() {}
- virtual void reply(
- Message& received,
- Message& response,
- MSGID
- responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
+ // like the reply below, but doesn't rely on received.data still being available
+ virtual void reply(Message& received, Message& response, MSGID responseTo) = 0;
virtual void reply(Message& received, Message& response) = 0;
virtual HostAndPort remote() const = 0;
diff --git a/src/mongo/util/net/miniwebserver.h b/src/mongo/util/net/miniwebserver.h
index fa026e6711d..b9d8c77b286 100644
--- a/src/mongo/util/net/miniwebserver.h
+++ b/src/mongo/util/net/miniwebserver.h
@@ -44,15 +44,14 @@ public:
MiniWebServer(const std::string& name, const std::string& ip, int _port);
virtual ~MiniWebServer() {}
- virtual void doRequest(
- const char* rq, // the full request
- std::string url,
- // set these and return them:
- std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>&
- headers, // if completely empty, content-type: text/html will be added
- const SockAddr& from) = 0;
+ virtual void doRequest(const char* rq, // the full request
+ std::string url,
+ // set these and return them:
+ std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>& headers, // if completely empty, content-type:
+ // text/html will be added
+ const SockAddr& from) = 0;
// --- static helpers ----
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
index a92483be232..e4c0eb70348 100644
--- a/src/mongo/util/ntservice.cpp
+++ b/src/mongo/util/ntservice.cpp
@@ -292,7 +292,8 @@ void installServiceOrDie(const wstring& serviceName,
while (true) {
// Make sure service doesn't already exist.
- // TODO: Check to see if service is in "Deleting" status, suggest the user close down Services MMC snap-ins.
+ // TODO: Check to see if service is in "Deleting" status, suggest the user close down
+ // Services MMC snap-ins.
schService = ::OpenService(schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS);
if (schService != NULL) {
log() << "There is already a service named '" << toUtf8String(serviceName)
@@ -380,13 +381,14 @@ void installServiceOrDie(const wstring& serviceName,
#if 1
if (!serviceInstalled) {
#else
- // This code sets the mongod service to auto-restart, forever.
- // This might be a fine thing to do except that when mongod or Windows has a crash, the mongo.lock
- // file is still around, so any attempt at a restart will immediately fail. With auto-restart, we
- // go into a loop, crashing and restarting, crashing and restarting, until someone comes in and
- // disables the service or deletes the mongod.lock file.
+ // This code sets the mongod service to auto-restart, forever. This might be a fine thing to do
+ // except that when mongod or Windows has a crash, the mongo.lock file is still around, so any
+ // attempt at a restart will immediately fail. With auto-restart, we go into a loop, crashing
+ // and restarting, crashing and restarting, until someone comes in and disables the service or
+ // deletes the mongod.lock file.
//
- // I'm leaving the old code here for now in case we solve this and are able to turn SC_ACTION_RESTART
+ // I'm leaving the old code here for now in case we solve this and are able to turn
+ // SC_ACTION_RESTART
// back on.
//
if (serviceInstalled) {
diff --git a/src/mongo/util/options_parser/option_description.h b/src/mongo/util/options_parser/option_description.h
index 73d4106a527..de4aba5c705 100644
--- a/src/mongo/util/options_parser/option_description.h
+++ b/src/mongo/util/options_parser/option_description.h
@@ -173,8 +173,9 @@ public:
OptionDescription& validRange(long min, long max);
/**
- * Specifies that this option is incompatible with another option. The std::string provided must
- * be the dottedName, which is the name used to access the option in the result Environment.
+ * Specifies that this option is incompatible with another option. The std::string provided
+ * must be the dottedName, which is the name used to access the option in the result
+ * Environment.
*
* TODO: Find a way to check that that option actually exists in our section somewhere.
*/
diff --git a/src/mongo/util/options_parser/options_parser.h b/src/mongo/util/options_parser/options_parser.h
index 9c3752f1660..f247fb6dfb0 100644
--- a/src/mongo/util/options_parser/options_parser.h
+++ b/src/mongo/util/options_parser/options_parser.h
@@ -108,7 +108,8 @@ private:
const std::vector<std::string>& argv,
Environment*);
- /** Handles parsing of an INI config std::string and adds the results to the given Environment */
+ /** Handles parsing of an INI config std::string and adds the results to the given Environment
+ * */
Status parseINIConfigFile(const OptionSection&, const std::string& config, Environment*);
/** Gets defaults from the OptionSection and adds them to the given Environment */
diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp
index 9709d22bbbb..d0bcbee2c01 100644
--- a/src/mongo/util/processinfo_linux.cpp
+++ b/src/mongo/util/processinfo_linux.cpp
@@ -142,11 +142,13 @@ public:
// The process ID.
char _comm[128];
- // The filename of the executable, in parentheses. This is visible whether or not the executable is swapped out.
+ // The filename of the executable, in parentheses. This is visible whether or not the
+ // executable is swapped out.
char _state;
- // One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible
- // disk sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
+ // One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible
+ // wait, D is waiting in uninterruptible disk sleep, Z is zombie, T is traced or stopped (on a
+ // signal), and W is paging.
int _ppid;
// The PID of the parent.
@@ -161,19 +163,23 @@ public:
// The tty the process uses.
int _tpgid;
- // The process group ID of the process which currently owns the tty that the process is connected to.
+ // The process group ID of the process which currently owns the tty that the process is
+ // connected to.
unsigned long _flags; // %lu
- // The kernel flags word of the process. For bit meanings, see the PF_* defines in <linux/sched.h>. Details depend on the kernel version.
+ // The kernel flags word of the process. For bit meanings, see the PF_* defines in
+ // <linux/sched.h>. Details depend on the kernel version.
unsigned long _min_flt; // %lu
- // The number of minor faults the process has made which have not required loading a memory page from disk.
+ // The number of minor faults the process has made which have not required loading a memory page
+ // from disk.
unsigned long _cmin_flt; // %lu
// The number of minor faults that the process
unsigned long _maj_flt; // %lu
- // The number of major faults the process has made which have required loading a memory page from disk.
+ // The number of major faults the process has made which have required loading a memory page
+ // from disk.
unsigned long _cmaj_flt; // %lu
// The number of major faults that the process
@@ -196,7 +202,8 @@ public:
// number of threads
unsigned long _alarm;
- // The time in jiffies before the next SIGALRM is sent to the process due to an interval timer. (unused since 2.6.17)
+ // The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
+ // (unused since 2.6.17)
unsigned long _start_time; // %lu
// The time in jiffies the process started after system boot.
@@ -205,8 +212,9 @@ public:
// Virtual memory size in bytes.
long _rss; // %ld
- // Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which
- // count towards text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out
+ // Resident Set Size: number of pages the process has in real memory, minus 3 for administrative
+ // purposes. This is just the pages which count towards text, data, or stack space. This
+ // does not include pages which have not been demand-loaded in, or which are swapped out
unsigned long _rss_rlim; // %lu
// Current limit in bytes on the rss of the process (usually 4294967295 on i386).
@@ -314,7 +322,8 @@ public:
}
// try known flat-text file locations
- // format: Slackware-x86_64 13.0, Red Hat Enterprise Linux Server release 5.6 (Tikanga), etc.
+ // format: Slackware-x86_64 13.0, Red Hat Enterprise Linux Server release 5.6 (Tikanga),
+ // etc.
typedef vector<string> pathvec;
pathvec paths;
pathvec::const_iterator i;
diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp
index 0dff58dd791..224cd5a896a 100644
--- a/src/mongo/util/processinfo_windows.cpp
+++ b/src/mongo/util/processinfo_windows.cpp
@@ -437,7 +437,9 @@ bool ProcessInfo::blockInMemory(const void* start) {
if (bstat) {
for (int i=0; i<30; i++) {
if (wiex[i].BasicInfo.FaultingPc == 0) break;
- cout << "faulting pc = " << wiex[i].BasicInfo.FaultingPc << " address = " << wiex[i].BasicInfo.FaultingVa << " thread id = " << wiex[i].FaultingThreadId << endl;
+ cout << "faulting pc = " << wiex[i].BasicInfo.FaultingPc <<
+ " address = " << wiex[i].BasicInfo.FaultingVa <<
+ " thread id = " << wiex[i].FaultingThreadId << endl;
}
}
#endif
diff --git a/src/mongo/util/progress_meter.h b/src/mongo/util/progress_meter.h
index a9672462534..50042bfadcc 100644
--- a/src/mongo/util/progress_meter.h
+++ b/src/mongo/util/progress_meter.h
@@ -127,8 +127,8 @@ private:
// e.g.:
// CurOp * op = CurOp::get(txn);
-// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: External Sort Progress", d->stats.nrecords, 10));
-// loop { pm.hit(); }
+// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort",
+// "Index: External Sort Progress", d->stats.nrecords, 10)); loop { pm.hit(); }
class ProgressMeterHolder {
MONGO_DISALLOW_COPYING(ProgressMeterHolder);
diff --git a/src/mongo/util/safe_num.cpp b/src/mongo/util/safe_num.cpp
index 1350fca1c78..c29422cf81b 100644
--- a/src/mongo/util/safe_num.cpp
+++ b/src/mongo/util/safe_num.cpp
@@ -172,7 +172,9 @@ SafeNum addInt32Int32(int lInt32, int rInt32) {
// NOTE: Please see "Secure Coding in C and C++", Second Edition, page 264-265 for
// details on this algorithm (for an alternative resources, see
//
- // https://www.securecoding.cert.org/confluence/display/seccode/INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?showComments=false).
+ // https://www.securecoding.cert.org/confluence/display/seccode/
+ // INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?
+ // showComments=false).
//
// We are using the "Downcast from a larger type" algorithm here. We always perform
// the arithmetic in 64-bit mode, which can never overflow for 32-bit
@@ -214,7 +216,9 @@ SafeNum mulInt32Int32(int lInt32, int rInt32) {
// NOTE: Please see "Secure Coding in C and C++", Second Edition, page 264-265 for
// details on this algorithm (for an alternative resources, see
//
- // https://www.securecoding.cert.org/confluence/display/seccode/INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?showComments=false).
+ // https://www.securecoding.cert.org/confluence/display/seccode/
+ // INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?
+ // showComments=false).
//
// We are using the "Downcast from a larger type" algorithm here. We always perform
// the arithmetic in 64-bit mode, which can never overflow for 32-bit
diff --git a/src/mongo/util/startup_test.h b/src/mongo/util/startup_test.h
index fd1b220e1d9..05bf5b57775 100644
--- a/src/mongo/util/startup_test.h
+++ b/src/mongo/util/startup_test.h
@@ -33,15 +33,16 @@
namespace mongo {
-/* The idea here is to let all initialization of global variables (classes inheriting from StartupTest)
- complete before we run the tests -- otherwise order of initilization being arbitrary may mess
- us up. The app's main() function should call runTests().
+/*
+ The idea here is to let all initialization of global variables (classes inheriting from
+ StartupTest) complete before we run the tests -- otherwise order of initilization being arbitrary
+ may mess us up. The app's main() function should call runTests().
- To define a unit test, inherit from this and implement run. instantiate one object for the new class
- as a global.
+ To define a unit test, inherit from this and implement run. instantiate one object for the new
+ class as a global.
- These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is a
- good quick check for a bad build.
+ These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is
+ a good quick check for a bad build.
*/
class StartupTest {
public:
diff --git a/src/mongo/util/winutil.h b/src/mongo/util/winutil.h
index 57e93730867..5d4b727b111 100644
--- a/src/mongo/util/winutil.h
+++ b/src/mongo/util/winutil.h
@@ -48,7 +48,8 @@ inline std::string GetWinErrMsg(DWORD err) {
NULL);
std::string errMsgStr = toUtf8String(errMsg);
::LocalFree(errMsg);
- // FormatMessage() appends a newline to the end of error messages, we trim it because std::endl flushes the buffer.
+ // FormatMessage() appends a newline to the end of error messages, we trim it because std::endl
+ // flushes the buffer.
errMsgStr = errMsgStr.erase(errMsgStr.length() - 2);
std::ostringstream output;
output << errMsgStr << " (" << err << ")";