summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-07-28 19:55:13 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-07-29 17:23:36 -0400
commit05c1738b852d3c4c309d001e5b4c9b1c06220904 (patch)
treec1460a5fb5eb2e6a922b390da3f8cb3a9ab9648e
parentb66e993f1c742518d9b5e93b0d8a5f8255a4127c (diff)
downloadmongo-05c1738b852d3c4c309d001e5b4c9b1c06220904.tar.gz
SERVER-18978: Clang-Format - Fix comment word wrapping indentation
-rw-r--r--src/mongo/bson/bsonelement.cpp3
-rw-r--r--src/mongo/bson/bsonelement.h3
-rw-r--r--src/mongo/bson/bsonmisc.h6
-rw-r--r--src/mongo/bson/bsonobj.cpp12
-rw-r--r--src/mongo/bson/bsonobj.h24
-rw-r--r--src/mongo/bson/bsonobjbuilder.h14
-rw-r--r--src/mongo/bson/bsonobjiterator.h6
-rw-r--r--src/mongo/bson/util/builder.h13
-rw-r--r--src/mongo/client/cyrus_sasl_client_session.cpp4
-rw-r--r--src/mongo/client/dbclient.cpp13
-rw-r--r--src/mongo/client/dbclient_rs.h6
-rw-r--r--src/mongo/client/dbclientinterface.h110
-rw-r--r--src/mongo/client/examples/mongoperf.cpp8
-rw-r--r--src/mongo/client/parallel.cpp41
-rw-r--r--src/mongo/crypto/tom/tomcrypt_cfg.h9
-rw-r--r--src/mongo/crypto/tom/tomcrypt_custom.h3
-rw-r--r--src/mongo/crypto/tom/tomcrypt_hash.h3
-rw-r--r--src/mongo/db/auth/role_graph_builtin_roles.cpp31
-rw-r--r--src/mongo/db/client.cpp3
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp3
-rw-r--r--src/mongo/db/commands/mr.cpp6
-rw-r--r--src/mongo/db/db.cpp3
-rw-r--r--src/mongo/db/dbcommands.cpp6
-rw-r--r--src/mongo/db/dbcommands_generic.cpp84
-rw-r--r--src/mongo/db/dbhelpers.h4
-rw-r--r--src/mongo/db/dbmessage.h4
-rw-r--r--src/mongo/db/geo/s2.h4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp3
-rw-r--r--src/mongo/db/mongod_options.cpp4
-rw-r--r--src/mongo/db/namespace_string.h3
-rw-r--r--src/mongo/db/prefetch.cpp9
-rw-r--r--src/mongo/db/query/get_executor.cpp4
-rw-r--r--src/mongo/db/query/planner_access.h4
-rw-r--r--src/mongo/db/query/query_planner_test.cpp12
-rw-r--r--src/mongo/db/repl/bgsync.cpp3
-rw-r--r--src/mongo/db/repl/master_slave.cpp25
-rw-r--r--src/mongo/db/repl/master_slave.h11
-rw-r--r--src/mongo/db/repl/oplog.cpp15
-rw-r--r--src/mongo/db/repl/repl_settings.h3
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_info.cpp3
-rw-r--r--src/mongo/db/repl/scoped_conn.h7
-rw-r--r--src/mongo/db/storage/mmap_v1/aligned_builder.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/key.h5
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace.h10
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.h4
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.h5
-rw-r--r--src/mongo/db/storage/mmap_v1/diskloc.h12
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp25
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalformat.h28
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journalimpl.h4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_stats.h5
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp11
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h6
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.h3
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/record.h9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp3
-rw-r--r--src/mongo/dbtests/chunktests.cpp4
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp8
-rw-r--r--src/mongo/dbtests/jstests.cpp4
-rw-r--r--src/mongo/dbtests/namespacetests.cpp690
-rw-r--r--src/mongo/dbtests/perftests.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp4
-rw-r--r--src/mongo/dbtests/sharding.cpp25
-rw-r--r--src/mongo/dbtests/threadedtests.cpp23
-rw-r--r--src/mongo/s/balance.cpp7
-rw-r--r--src/mongo/s/balance.h23
-rw-r--r--src/mongo/s/chunk.cpp18
-rw-r--r--src/mongo/s/chunk.h9
-rw-r--r--src/mongo/s/chunk_diff-inl.h3
-rw-r--r--src/mongo/s/commands_admin.cpp6
-rw-r--r--src/mongo/s/commands_public.cpp18
-rw-r--r--src/mongo/s/config.cpp33
-rw-r--r--src/mongo/s/config.h6
-rw-r--r--src/mongo/s/d_migrate.cpp27
-rw-r--r--src/mongo/s/d_split.cpp42
-rw-r--r--src/mongo/s/d_state.h4
-rw-r--r--src/mongo/s/distlock.cpp96
-rw-r--r--src/mongo/s/distlock.h16
-rw-r--r--src/mongo/s/grid.cpp14
-rw-r--r--src/mongo/s/grid.h3
-rw-r--r--src/mongo/s/server.cpp3
-rw-r--r--src/mongo/s/shard.cpp8
-rw-r--r--src/mongo/s/type_changelog.h4
-rw-r--r--src/mongo/scripting/bson_template_evaluator.h3
-rw-r--r--src/mongo/scripting/bson_template_evaluator_test.cpp3
-rw-r--r--src/mongo/shell/linenoise.cpp59
-rw-r--r--src/mongo/shell/linenoise_utf8.cpp17
-rw-r--r--src/mongo/shell/linenoise_utf8.h34
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp5
-rw-r--r--src/mongo/util/assert_util.h6
-rw-r--r--src/mongo/util/background.h4
-rw-r--r--src/mongo/util/concurrency/value.h3
-rw-r--r--src/mongo/util/debugger.cpp3
-rw-r--r--src/mongo/util/descriptive_stats-inl.h9
-rw-r--r--src/mongo/util/file_allocator.cpp3
-rw-r--r--src/mongo/util/logfile.h3
-rw-r--r--src/mongo/util/mmap.h3
-rw-r--r--src/mongo/util/mmap_win.cpp3
-rw-r--r--src/mongo/util/moveablebuffer.h6
-rw-r--r--src/mongo/util/net/listen.h3
-rw-r--r--src/mongo/util/net/message.h6
-rw-r--r--src/mongo/util/net/message_port.h4
-rw-r--r--src/mongo/util/net/miniwebserver.h17
-rw-r--r--src/mongo/util/ntservice.cpp15
-rw-r--r--src/mongo/util/options_parser/option_description.h5
-rw-r--r--src/mongo/util/options_parser/options_parser.h3
-rw-r--r--src/mongo/util/processinfo_linux2.cpp31
-rw-r--r--src/mongo/util/processinfo_win32.cpp5
-rw-r--r--src/mongo/util/progress_meter.h4
-rw-r--r--src/mongo/util/ptr.h3
-rw-r--r--src/mongo/util/queue.h3
-rw-r--r--src/mongo/util/safe_num.cpp8
-rw-r--r--src/mongo/util/startup_test.h11
-rw-r--r--src/mongo/util/winutil.h3
124 files changed, 1168 insertions, 953 deletions
diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp
index d22efdcb699..edc248948ea 100644
--- a/src/mongo/bson/bsonelement.cpp
+++ b/src/mongo/bson/bsonelement.cpp
@@ -841,7 +841,8 @@ int compareElementValues(const BSONElement& l, const BSONElement& r) {
case Bool:
return *l.value() - *r.value();
case Timestamp:
- // unsigned compare for timestamps - note they are not really dates but (ordinal + time_t)
+ // unsigned compare for timestamps - note they are not really dates
+ // but (ordinal + time_t)
if (l.date() < r.date())
return -1;
return l.date() == r.date() ? 0 : 1;
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index 0a8270c3d08..94767b88dba 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -663,7 +663,8 @@ inline double BSONElement::numberDouble() const {
}
}
-/** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if another numeric type. */
+/** Retrieve int value for the element safely. Zero returned if not a number.
+ * Converted to int if another numeric type. */
inline int BSONElement::numberInt() const {
switch (type()) {
case NumberDouble:
diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h
index e8e85df7860..cbca88fecb0 100644
--- a/src/mongo/bson/bsonmisc.h
+++ b/src/mongo/bson/bsonmisc.h
@@ -85,7 +85,8 @@ enum FieldCompareResult {
/** Use BSON_ARRAY macro like BSON macro, but without keys
- BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ BSONArray arr =
+ BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
*/
#define BSON_ARRAY(x) ((::mongo::BSONArrayBuilder() << x).arr())
@@ -264,7 +265,8 @@ private:
};
/**
- used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
+ used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory
+ usage
*/
class BSONSizeTracker {
public:
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index 1cbb6a7be1e..7ce9a53e875 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -179,12 +179,12 @@ int BSONObj::woCompare(const BSONObj& r, const BSONObj& idxKey, bool considerFie
int x;
/*
- if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
- l.type() == String && r.type() == String ) {
- // note: no negative support yet, as this is just sort of a POC
- x = _stricmp(l.valuestr(), r.valuestr());
- }
- else*/ {
+ if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
+ l.type() == String && r.type() == String ) {
+ // note: no negative support yet, as this is just sort of a POC
+ x = _stricmp(l.valuestr(), r.valuestr());
+ }
+ else*/ {
x = l.woCompare(r, considerFieldName);
if (ordered && o.number() < 0)
x = -x;
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index eaa4147d5ca..742e1cdd9c2 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -143,22 +143,22 @@ public:
A BSONObj can use a buffer it "owns" or one it does not.
OWNED CASE
- If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by assignment).
- In this case the buffer is basically implemented as a shared_ptr.
+ If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by
+ assignment). In this case the buffer is basically implemented as a shared_ptr.
Since BSONObj's are typically immutable, this works well.
UNOWNED CASE
- A BSONObj can also point to BSON data in some other data structure it does not "own" or free later.
- For example, in a memory mapped file. In this case, it is important the original data stays in
- scope for as long as the BSONObj is in use. If you think the original data may go out of scope,
- call BSONObj::getOwned() to promote your BSONObj to having its own copy.
+ A BSONObj can also point to BSON data in some other data structure it does not "own" or free
+ later. For example, in a memory mapped file. In this case, it is important the original data
+ stays in scope for as long as the BSONObj is in use. If you think the original data may go
+ out of scope, call BSONObj::getOwned() to promote your BSONObj to having its own copy.
On a BSONObj assignment, if the source is unowned, both the source and dest will have unowned
pointers to the original buffer after the assignment.
If you are not sure about ownership but need the buffer to last as long as the BSONObj, call
- getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a malloc
- and memcpy will result.
+ getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a
+ malloc and memcpy will result.
Most ways to create BSONObj's create 'owned' variants. Unowned versions can be created with:
(1) specifying true for the ifree parameter in the constructor
@@ -240,8 +240,8 @@ public:
/** Get several fields at once. This is faster than separate getField() calls as the size of
elements iterated can then be calculated only once each.
@param n number of fieldNames, and number of elements in the fields array
- @param fields if a field is found its element is stored in its corresponding position in this array.
- if not found the array element is unchanged.
+ @param fields if a field is found its element is stored in its corresponding position in
+ this array. if not found the array element is unchanged.
*/
void getFields(unsigned n, const char** fieldNames, BSONElement* fields) const;
@@ -459,8 +459,8 @@ public:
return BSONElement(objdata() + 4);
}
- /** faster than firstElement().fieldName() - for the first element we can easily find the fieldname without
- computing the element size.
+ /** faster than firstElement().fieldName() - for the first element we can easily find the
+ * fieldname without computing the element size.
*/
const char* firstElementFieldName() const {
const char* p = objdata() + 4;
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index ff72c46be5c..ebdf798b807 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -78,7 +78,8 @@ public:
}
/** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
- * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for example.
+ * This is for more efficient adding of subobjects/arrays. See docs for
+ * subobjStart for example.
*/
BSONObjBuilder(BufBuilder& baseBuilder)
: _b(baseBuilder),
@@ -129,16 +130,16 @@ public:
/** append element to the object we are building */
BSONObjBuilder& append(const BSONElement& e) {
- verify(
- !e.eoo()); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify(!e.eoo());
_b.appendBuf((void*)e.rawdata(), e.size());
return *this;
}
/** append an element but with a new name */
BSONObjBuilder& appendAs(const BSONElement& e, const StringData& fieldName) {
- verify(
- !e.eoo()); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ verify(!e.eoo());
_b.appendNum((char)e.type());
_b.appendStr(fieldName);
_b.appendBuf((void*)e.value(), e.valuesize());
@@ -709,7 +710,8 @@ public:
return false;
}
- /** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
+ /** @return true if we are using our own bufbuilder, and not an alternate
+ * that was given to us in our constructor */
bool owned() const {
return &_b == &_buf;
}
diff --git a/src/mongo/bson/bsonobjiterator.h b/src/mongo/bson/bsonobjiterator.h
index 402cd244e28..dfd3326aedf 100644
--- a/src/mongo/bson/bsonobjiterator.h
+++ b/src/mongo/bson/bsonobjiterator.h
@@ -70,12 +70,14 @@ public:
return _pos < _theend;
}
- /** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
+ /** @return true if more elements exist to be enumerated INCLUDING the EOO element which is
+ * always at the end. */
bool moreWithEOO() {
return _pos <= _theend;
}
- /** @return the next element in the object. For the final element, element.eoo() will be true. */
+ /** @return the next element in the object. For the final element, element.eoo() will be true.
+ * */
BSONElement next(bool checkEnd) {
verify(_pos <= _theend);
diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h
index fd55b090d16..46c0bd2c52c 100644
--- a/src/mongo/bson/util/builder.h
+++ b/src/mongo/bson/util/builder.h
@@ -45,9 +45,9 @@
namespace mongo {
/* Accessing unaligned doubles on ARM generates an alignment trap and aborts with SIGBUS on Linux.
- Wrapping the double in a packed struct forces gcc to generate code that works with unaligned values too.
- The generated code for other architectures (which already allow unaligned accesses) is the same as if
- there was a direct pointer access.
+ Wrapping the double in a packed struct forces gcc to generate code that works with unaligned
+ values too. The generated code for other architectures (which already allow unaligned accesses)
+ is the same as if there was a direct pointer access.
*/
struct PackedDouble {
double d;
@@ -57,8 +57,8 @@ struct PackedDouble {
/* Note the limit here is rather arbitrary and is simply a standard. generally the code works
with any object that fits in ram.
- Also note that the server has some basic checks to enforce this limit but those checks are not exhaustive
- for example need to check for size too big after
+ Also note that the server has some basic checks to enforce this limit but those checks are not
+ exhaustive for example need to check for size too big after
update $push (append) operation
various db.eval() type operations
*/
@@ -164,7 +164,8 @@ public:
}
/** leave room for some stuff later
- @return point to region that was skipped. pointer may change later (on realloc), so for immediate use only
+ @return point to region that was skipped. pointer may change later (on realloc), so for
+ immediate use only
*/
char* skip(int n) {
return grow(n);
diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp
index 7dc6a06c49a..6402c6f7ec3 100644
--- a/src/mongo/client/cyrus_sasl_client_session.cpp
+++ b/src/mongo/client/cyrus_sasl_client_session.cpp
@@ -149,8 +149,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CyrusSaslClientContext,
}
/**
- * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow the Cyrus SASL
- * library to query for the authentication id and other simple string configuration parameters.
+ * Callback registered on the sasl_conn_t underlying a CyrusSaslClientSession to allow the Cyrus
+ * SASL library to query for the authentication id and other simple string configuration parameters.
*
* Note that in Mongo, the authentication and authorization ids (authid and authzid) are always
* the same. These correspond to SASL_CB_AUTHNAME and SASL_CB_USER.
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index 43641d11e87..745bf5ce582 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -979,8 +979,8 @@ bool DBClientWithCommands::exists(const string& ns) {
void DBClientConnection::_auth(const BSONObj& params) {
if (autoReconnect) {
- /* note we remember the auth info before we attempt to auth -- if the connection is broken, we will
- then have it for the next autoreconnect attempt.
+ /* note we remember the auth info before we attempt to auth -- if the connection is broken,
+ * we will then have it for the next autoreconnect attempt.
*/
authCache[params[saslCommandUserDBFieldName].str()] = params.getOwned();
}
@@ -988,7 +988,8 @@ void DBClientConnection::_auth(const BSONObj& params) {
DBClientBase::_auth(params);
}
-/** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+/** query N objects from the database into an array. makes sense mostly when you want a small
+ * number of results. if a huge number, use
query() and iterate the cursor.
*/
void DBClientInterface::findN(vector<BSONObj>& out,
@@ -1702,9 +1703,9 @@ bool DBClientConnection::_lazyKillCursor = true;
bool serverAlive(const string& uri) {
- DBClientConnection c(
- false,
- 20); // potentially the connection to server could fail while we're checking if it's alive - so use timeouts
+ // potentially the connection to server could fail while we're checking if it's alive - so use
+ // timeouts
+ DBClientConnection c(false, 20);
string err;
if (!c.connect(HostAndPort(uri), err))
return false;
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 97d12791aa5..66eb965c286 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -58,7 +58,8 @@ public:
using DBClientBase::update;
using DBClientBase::remove;
- /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
+ /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet
+ * connections. */
DBClientReplicaSet(const std::string& name,
const std::vector<HostAndPort>& servers,
double so_timeout = 0);
@@ -137,7 +138,8 @@ public:
bool* retry = NULL,
std::string* targetHost = NULL);
- /* this is the callback from our underlying connections to notify us that we got a "not master" error.
+ /* this is the callback from our underlying connections to notify us that we got a "not master"
+ * error.
*/
void isntMaster();
diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h
index 4d05649b14b..a246abe5b99 100644
--- a/src/mongo/client/dbclientinterface.h
+++ b/src/mongo/client/dbclientinterface.h
@@ -50,13 +50,13 @@ namespace mongo {
/** the query field 'options' can have these bits set: */
enum MONGO_CLIENT_API QueryOptions {
- /** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
- the final object's position. you can resume using the cursor later, from where it was located,
- if more data were received. Set on dbQuery and dbGetMore.
+ /** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor
+ * marks the final object's position. you can resume using the cursor later, from where it was
+ located, if more data were received. Set on dbQuery and dbGetMore.
like any "latent cursor", the cursor may become invalid at some point -- for example if that
- final object it references were deleted. Thus, you should be prepared to requery if you get back
- ResultFlag_CursorNotFound.
+ final object it references were deleted. Thus, you should be prepared to requery if you get
+ back ResultFlag_CursorNotFound.
*/
QueryOption_CursorTailable = 1 << 1,
@@ -74,21 +74,24 @@ enum MONGO_CLIENT_API QueryOptions {
// an extended period of time.
QueryOption_OplogReplay = 1 << 3,
- /** The server normally times out idle cursors after an inactivity period to prevent excess memory uses
+ /** The server normally times out idle cursors after an inactivity period to prevent excess
+ * memory uses
Set this option to prevent that.
*/
QueryOption_NoCursorTimeout = 1 << 4,
- /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
- than returning no data. After a timeout period, we do return as normal.
+ /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while
+ * rather than returning no data. After a timeout period, we do return as normal.
*/
QueryOption_AwaitData = 1 << 5,
- /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
- will fully read all data queried. Faster when you are pulling a lot of data and know you want to
- pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
+ /** Stream the data down full blast in multiple "more" packages, on the assumption that the
+ * client will fully read all data queried. Faster when you are pulling a lot of data and know
+ * you want to pull it all down. Note: it is not allowed to not read all the data unless you
+ * close the connection.
- Use the query( stdx::function<void(const BSONObj&)> f, ... ) version of the connection's query()
+ Use the query( stdx::function<void(const BSONObj&)> f, ... ) version of the connection's
+ query()
method, and it will take care of all the details for you.
*/
QueryOption_Exhaust = 1 << 6,
@@ -367,7 +370,8 @@ class ScopedDbConnection;
class DBClientCursor;
class DBClientCursorBatchIterator;
-/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
+/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a
+ * Query object.
Examples:
QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
QUERY( "age" << GT << 30 << LT << 50 )
@@ -385,7 +389,8 @@ public:
Query(const char* json);
/** Add a sort (ORDER BY) criteria to the query expression.
- @param sortPattern the sort order template. For example to order by name ascending, time descending:
+ @param sortPattern the sort order template. For example to order by name ascending, time
+ descending:
{ name : 1, ts : -1 }
i.e.
BSON( "name" << 1 << "ts" << -1 )
@@ -421,14 +426,16 @@ public:
*/
Query& maxKey(const BSONObj& val);
- /** Return explain information about execution of this query instead of the actual query results.
+ /** Return explain information about execution of this query instead of the actual query
+ * results.
Normally it is easier to use the mongo shell to run db.find(...).explain().
*/
Query& explain();
- /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
- present at both the start and end of the query's execution (if an object is new during the query, or deleted during
- the query, it may or may not be returned, even with snapshot mode).
+ /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or
+ * objects missed, which were present at both the start and end of the query's execution (if an
+ * object is new during the query, or deleted during the query, it may or may not be returned,
+ * even with snapshot mode).
Note that short query responses (less than 1MB) are always effectively snapshotted.
@@ -603,7 +610,8 @@ MONGO_CLIENT_API std::string nsGetCollection(const std::string& ns);
class MONGO_CLIENT_API DBConnector {
public:
virtual ~DBConnector() {}
- /** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
+ /** actualServer is set to the actual server where they call went if there was a choice
+ * (SlaveOk) */
virtual bool call(Message& toSend,
Message& response,
bool assertOk = true,
@@ -668,7 +676,8 @@ public:
const BSONObj* fieldsToReturn = 0,
int queryOptions = 0);
- /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ /** query N objects from the database into an array. makes sense mostly when you want a small
+ * number of results. if a huge number, use
query() and iterate the cursor.
*/
void findN(std::vector<BSONObj>& out,
@@ -718,8 +727,8 @@ public:
@param dbname database name. Use "admin" for global administrative commands.
@param cmd the command object to execute. For example, { ismaster : 1 }
- @param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
- set.
+ @param info the result object the database returns. Typically has { ok : ..., errmsg : ... }
+ fields set.
@param options see enum QueryOptions - normally not needed to run a command
@param auth if set, the BSONObj representation will be appended to the command object sent
@@ -758,9 +767,10 @@ public:
/** Authorize access to a particular database.
Authentication is separate for each database on the server -- you may authenticate for any
number of databases on a single connection.
- The "admin" database is special and once authenticated provides access to all databases on the
- server.
- @param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
+ The "admin" database is special and once authenticated provides access to all databases on
+ the server.
+ @param digestPassword if password is plain text, set this to true. otherwise assumed
+ to be pre-digested
@param[out] authLevel level of authentication for the given user
@return true if successful
*/
@@ -802,8 +812,8 @@ public:
virtual bool isMaster(bool& isMaster, BSONObj* info = 0);
/**
- Create a new collection in the database. Normally, collection creation is automatic. You would
- use this function if you wish to specify special options on creation.
+ Create a new collection in the database. Normally, collection creation is automatic. You
+ would use this function if you wish to specify special options on creation.
If the collection already exists, no action occurs.
@@ -891,8 +901,8 @@ public:
return res;
}
- /** Perform a repair and compaction of the specified database. May take a long time to run. Disk space
- must be available equal to the size of the database while repairing.
+ /** Perform a repair and compaction of the specified database. May take a long time to run.
+ * Disk space must be available equal to the size of the database while repairing.
*/
bool repairDatabase(const std::string& dbname, BSONObj* info = 0) {
return simpleCommand(dbname, info, "repairDatabase");
@@ -903,8 +913,9 @@ public:
Generally, you should dropDatabase() first as otherwise the copied information will MERGE
into whatever data is already present in this database.
- For security reasons this function only works when you are authorized to access the "admin" db. However,
- if you have access to said db, you can copy any database from one place to another.
+ For security reasons this function only works when you are authorized to access the "admin"
+ db. However, if you have access to said db, you can copy any database from one place to
+ another.
TODO: this needs enhancement to be more flexible in terms of security.
This method provides a way to "rename" a database by copying it to a new db name and
@@ -922,9 +933,9 @@ public:
const std::string& fromhost = "",
BSONObj* info = 0);
- /** The Mongo database provides built-in performance profiling capabilities. Uset setDbProfilingLevel()
- to enable. Profiling information is then written to the system.profile collection, which one can
- then query.
+ /** The Mongo database provides built-in performance profiling capabilities. Uset
+ * setDbProfilingLevel() to enable. Profiling information is then written to the system.profile
+ * collection, which one can then query.
*/
enum ProfilingLevel {
ProfileOff = 0,
@@ -978,15 +989,15 @@ public:
MROutput output = MRInline);
/** Run javascript code on the database server.
- dbname database SavedContext in which the code runs. The javascript variable 'db' will be assigned
- to this database when the function is invoked.
+ dbname database SavedContext in which the code runs. The javascript variable 'db' will be
+ assigned to this database when the function is invoked.
jscode source code for a javascript function.
- info the command object which contains any information on the invocation result including
- the return value and other information. If an error occurs running the jscode, error
- information will be in info. (try "log() << info.toString()")
+ info the command object which contains any information on the invocation result
+ including the return value and other information. If an error occurs running the
+ jscode, error information will be in info. (try "log() << info.toString()")
retValue return value from the jscode function.
- args args to pass to the jscode function. when invoked, the 'args' variable will be defined
- for use by the jscode.
+ args args to pass to the jscode function. when invoked, the 'args' variable will be
+ defined for use by the jscode.
returns true if runs ok.
@@ -1021,7 +1032,8 @@ public:
return eval(dbname, jscode, info, retValue, &args);
}
- /** eval invocation with one parm to server and one numeric field (either int or double) returned */
+ /** eval invocation with one parm to server and one numeric field (either int or double)
+ * returned */
template <class T, class NumType>
bool eval(const std::string& dbname, const std::string& jscode, T parm1, NumType& ret) {
BSONObj info;
@@ -1063,7 +1075,8 @@ public:
@param ns collection to be indexed
@param keys the "key pattern" for the index. e.g., { name : 1 }
@param unique if true, indicates that key uniqueness should be enforced for this index
- @param name if not specified, it will be created from the keys automatically (which is recommended)
+ @param name if not specified, it will be created from the keys automatically (which is
+ recommended)
@param cache if set to false, the index cache for the connection won't remember this call
@param background build index in the background (see mongodb docs for details)
@param v index version. leave at default value. (unit tests set this parameter.)
@@ -1080,7 +1093,8 @@ public:
int v = -1,
int ttl = 0);
/**
- clears the index cache, so the subsequent call to ensureIndex for any index will go to the server
+ clears the index cache, so the subsequent call to ensureIndex for any index will go to the
+ server
*/
virtual void resetIndexCache();
@@ -1232,7 +1246,8 @@ public:
to specify a sort order.
@param nToReturn n to return (i.e., limit). 0 = unlimited
@param nToSkip start with the nth item
- @param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields
+ @param fieldsToReturn optional template of which fields to select. if unspecified, returns all
+ fields
@param queryOptions see options enum at top of this file
@return cursor. 0 if error (connection failure)
@@ -1314,7 +1329,7 @@ public:
virtual void killCursor(long long cursorID) = 0;
virtual bool callRead(Message& toSend, Message& response) = 0;
- // virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
+ // virtual bool callWrite( Message& toSend , Message& response ) = 0; //TODO: add this if needed
virtual ConnectionString::ConnectionType type() const = 0;
@@ -1371,7 +1386,8 @@ public:
If autoReconnect is true, you can try to use the DBClientConnection even when
false was returned -- it will try to connect again.
- @param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ @param serverHostname host to connect to. can include port number ( 127.0.0.1 ,
+ 127.0.0.1:5555 )
*/
void connect(const std::string& serverHostname) {
std::string errmsg;
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
index 327cfdb114d..8d982a91ef4 100644
--- a/src/mongo/client/examples/mongoperf.cpp
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -69,7 +69,8 @@ unsigned long long len; // file len
const unsigned PG = 4096;
unsigned nThreadsRunning = 0;
-// as this is incremented A LOT, at some point this becomes a bottleneck if very high ops/second (in cache) things are happening.
+// as this is incremented A LOT, at some point this becomes a bottleneck if very high ops/second (in
+// cache) things are happening.
AtomicUInt32 iops;
SimpleMutex m("mperf");
@@ -177,8 +178,9 @@ void go() {
return;
}
lf = new LogFile(fname, true);
- const unsigned sz = 1024 * 1024 *
- 32; // needs to be big as we are using synchronousAppend. if we used a regular MongoFile it wouldn't have to be
+ // needs to be big as we are using synchronousAppend. if we used a regular MongoFile it
+ // wouldn't have to be
+ const unsigned sz = 1024 * 1024 * 32;
char* buf = (char*)mongoMalloc(sz + 4096);
const char* p = round(buf);
for (unsigned long long i = 0; i < len; i += sz) {
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 96f0c0c76c6..176f7b9a108 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -751,14 +751,15 @@ void ParallelSortClusteredCursor::startInit() {
_qSpec.options(), // options
// NtoReturn is weird.
// If zero, it means use default size, so we do that for all cursors
- // If positive, it's the batch size (we don't want this cursor limiting results), that's
- // done at a higher level
- // If negative, it's the batch size, but we don't create a cursor - so we don't want
- // to create a child cursor either.
- // Either way, if non-zero, we want to pull back the batch size + the skip amount as
- // quickly as possible. Potentially, for a cursor on a single shard or if we keep better track of
- // chunks, we can actually add the skip value into the cursor and/or make some assumptions about the
- // return value size ( (batch size + skip amount) / num_servers ).
+ // If positive, it's the batch size (we don't want this cursor limiting
+ // results), that's done at a higher level
+ // If negative, it's the batch size, but we don't create a cursor - so we
+ // don't want to create a child cursor either.
+ // Either way, if non-zero, we want to pull back the batch size + the skip
+ // amount as quickly as possible. Potentially, for a cursor on a single
+ // shard or if we keep better track of chunks, we can actually add the skip
+ // value into the cursor and/or make some assumptions about the return value
+ // size ( (batch size + skip amount) / num_servers ).
_qSpec.ntoreturn() == 0 ? 0 : (_qSpec.ntoreturn() > 0
? _qSpec.ntoreturn() + _qSpec.ntoskip()
: _qSpec.ntoreturn() -
@@ -816,7 +817,8 @@ void ParallelSortClusteredCursor::startInit() {
<< (lazyInit ? "(lazily) " : "(full) ") << "on shard " << shard
<< ", current connection state is " << mdata.toBSON() << endl;
} catch (StaleConfigException& e) {
- // Our version isn't compatible with the current version anymore on at least one shard, need to retry immediately
+ // Our version isn't compatible with the current version anymore on at least one shard,
+ // need to retry immediately
NamespaceString staleNS(e.getns());
// For legacy reasons, this may not be set in the exception :-(
@@ -1202,7 +1204,8 @@ void ParallelSortClusteredCursor::_oldInit() {
vector<shared_ptr<ShardConnection>> conns;
vector<string> servers;
- // Since we may get all sorts of errors, record them all as they come and throw them later if necessary
+ // Since we may get all sorts of errors, record them all as they come and throw them later if
+ // necessary
vector<string> staleConfigExs;
vector<string> socketExs;
vector<string> otherExs;
@@ -1211,7 +1214,8 @@ void ParallelSortClusteredCursor::_oldInit() {
int retries = -1;
// Loop through all the queries until we've finished or gotten a socket exception on all of them
- // We break early for non-socket exceptions, and socket exceptions if we aren't returning partial results
+ // We break early for non-socket exceptions, and socket exceptions if we aren't returning
+ // partial results
do {
retries++;
@@ -1246,7 +1250,8 @@ void ParallelSortClusteredCursor::_oldInit() {
string errLoc = " @ " + sq._server;
if (firstPass) {
- // This may be the first time connecting to this shard, if so we can get an error here
+ // This may be the first time connecting to this shard, if so we can get an error
+ // here
try {
conns.push_back(
shared_ptr<ShardConnection>(new ShardConnection(sq._server, _ns)));
@@ -1308,10 +1313,10 @@ void ParallelSortClusteredCursor::_oldInit() {
}
}
- // Go through all the potentially started cursors and finish initializing them or log any errors and
- // potentially retry
- // TODO: Better error classification would make this easier, errors are indicated in all sorts of ways
- // here that we need to trap.
+ // Go through all the potentially started cursors and finish initializing them or log any
+ // errors and potentially retry
+ // TODO: Better error classification would make this easier, errors are indicated in all
+ // sorts of ways here that we need to trap.
for (size_t i = 0; i < num; i++) {
// log() << "Finishing query for " << cons[i].get()->getHost() << endl;
string errLoc = " @ " + queries[i]._server;
@@ -1647,8 +1652,8 @@ bool Future::CommandResult::join(int maxRetries) {
}
}
- // We may not always have a collection, since we don't know from a generic command what collection
- // is supposed to be acted on, if any
+ // We may not always have a collection, since we don't know from a generic command what
+ // collection is supposed to be acted on, if any
if (nsGetCollection(staleNS).size() == 0) {
warning() << "no collection namespace in stale config exception "
<< "for lazy command " << _cmd << ", could not refresh " << staleNS
diff --git a/src/mongo/crypto/tom/tomcrypt_cfg.h b/src/mongo/crypto/tom/tomcrypt_cfg.h
index daae2890d67..c599bab88ca 100644
--- a/src/mongo/crypto/tom/tomcrypt_cfg.h
+++ b/src/mongo/crypto/tom/tomcrypt_cfg.h
@@ -64,11 +64,12 @@ LTC_EXPORT int LTC_CALL XSTRCMP(const char* s1, const char* s2);
#define ARGTYPE 0
#endif
-/* Controls endianess and size of registers. Leave uncommented to get platform neutral [slower] code
+/* Controls endianess and size of registers. Leave uncommented to get platform neutral [slower]
+ * code
*
- * Note: in order to use the optimized macros your platform must support unaligned 32 and 64 bit read/writes.
- * The x86 platforms allow this but some others [ARM for instance] do not. On those platforms you **MUST**
- * use the portable [slower] macros.
+ * Note: in order to use the optimized macros your platform must support unaligned 32 and 64 bit
+ * read/writes. The x86 platforms allow this but some others [ARM for instance] do not. On those
+ * platforms you **MUST** use the portable [slower] macros.
*/
/* detect x86-32 machines somewhat */
diff --git a/src/mongo/crypto/tom/tomcrypt_custom.h b/src/mongo/crypto/tom/tomcrypt_custom.h
index 07d64fc83f6..9d64e630010 100644
--- a/src/mongo/crypto/tom/tomcrypt_custom.h
+++ b/src/mongo/crypto/tom/tomcrypt_custom.h
@@ -410,7 +410,8 @@
/* Debuggers */
-/* define this if you use Valgrind, note: it CHANGES the way SOBER-128 and LTC_RC4 work (see the code) */
+/* define this if you use Valgrind, note: it CHANGES the way SOBER-128 and LTC_RC4 work (see the
+ * code) */
/* #define LTC_VALGRIND */
#endif
diff --git a/src/mongo/crypto/tom/tomcrypt_hash.h b/src/mongo/crypto/tom/tomcrypt_hash.h
index 7060353d4c2..db3cd46c152 100644
--- a/src/mongo/crypto/tom/tomcrypt_hash.h
+++ b/src/mongo/crypto/tom/tomcrypt_hash.h
@@ -199,7 +199,8 @@ extern struct ltc_hash_descriptor {
*/
int (*test)(void);
- /* accelerated hmac callback: if you need to-do multiple packets just use the generic hmac_memory and provide a hash callback */
+ /* accelerated hmac callback: if you need to-do multiple packets just use the generic
+ * hmac_memory and provide a hash callback */
int (*hmac_block)(const unsigned char* key,
unsigned long keylen,
const unsigned char* in,
diff --git a/src/mongo/db/auth/role_graph_builtin_roles.cpp b/src/mongo/db/auth/role_graph_builtin_roles.cpp
index 81f06ce0fe0..f86a2981d0a 100644
--- a/src/mongo/db/auth/role_graph_builtin_roles.cpp
+++ b/src/mongo/db/auth/role_graph_builtin_roles.cpp
@@ -131,20 +131,23 @@ MONGO_INITIALIZER(AuthorizationBuiltinRoles)(InitializerContext* context) {
// DB admin role
- dbAdminRoleActions
- << ActionType::collMod << ActionType::collStats // clusterMonitor gets this also
- << ActionType::compact << ActionType::convertToCapped // read_write gets this also
- << ActionType::createCollection // read_write gets this also
- << ActionType::dbStats // clusterMonitor gets this also
- << ActionType::dropCollection
- << ActionType::
- dropDatabase // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
- << ActionType::dropIndex << ActionType::createIndex << ActionType::indexStats
- << ActionType::enableProfiler << ActionType::listCollections << ActionType::listIndexes
- << ActionType::planCacheIndexFilter << ActionType::planCacheRead
- << ActionType::planCacheWrite << ActionType::reIndex
- << ActionType::renameCollectionSameDB // read_write gets this also
- << ActionType::repairDatabase << ActionType::storageDetails << ActionType::validate;
+ dbAdminRoleActions << ActionType::collMod
+ << ActionType::collStats // clusterMonitor gets this also
+ << ActionType::compact
+ << ActionType::convertToCapped // read_write gets this also
+ << ActionType::createCollection // read_write gets this also
+ << ActionType::dbStats // clusterMonitor gets this also
+ << ActionType::dropCollection
+ // clusterAdmin gets this also TODO(spencer): should readWriteAnyDatabase?
+ << ActionType::dropDatabase << ActionType::dropIndex
+ << ActionType::createIndex << ActionType::indexStats
+ << ActionType::enableProfiler << ActionType::listCollections
+ << ActionType::listIndexes << ActionType::planCacheIndexFilter
+ << ActionType::planCacheRead << ActionType::planCacheWrite
+ << ActionType::reIndex
+ << ActionType::renameCollectionSameDB // read_write gets this also
+ << ActionType::repairDatabase << ActionType::storageDetails
+ << ActionType::validate;
// clusterMonitor role actions that target the cluster resource
clusterMonitorRoleClusterActions
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
index 32931f81652..b357c317c33 100644
--- a/src/mongo/db/client.cpp
+++ b/src/mongo/db/client.cpp
@@ -280,7 +280,8 @@ Client::WriteContext::WriteContext(OperationContext* opCtx, const std::string& n
void Client::Context::checkNotStale() const {
switch (_client->_curOp->getOp()) {
case dbGetMore: // getMore's are special and should be handled else where
- case dbUpdate: // update & delete check shard version in instance.cpp, so don't check here as well
+ // update & delete check shard version in instance.cpp, so don't check here as well
+ case dbUpdate:
case dbDelete:
break;
default: { ensureShardVersionOKOrThrow(_ns); }
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index d55cd57aaad..b4a032522cf 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -159,7 +159,8 @@ bool planCacheContains(const PlanCache& planCache,
PlanCacheEntry* entry = *i;
// Canonicalizing query shape in cache entry to get cache key.
- // Alternatively, we could add key to PlanCacheEntry but that would be used in one place only.
+ // Alternatively, we could add key to PlanCacheEntry but that would be used in one place
+ // only.
ASSERT_OK(
CanonicalQuery::canonicalize(ns, entry->query, entry->sort, entry->projection, &cqRaw));
scoped_ptr<CanonicalQuery> currentQuery(cqRaw);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index fd4e72bd7cd..716f832a1ed 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -758,7 +758,8 @@ void State::init() {
_scope->invoke(init, 0, 0, 0, true);
// js function to run reduce on all keys
- // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list =
+ // hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
_reduceAll = _scope->createFunction(
"var map = _mrMap;"
"var list, ret;"
@@ -1570,7 +1571,8 @@ public:
}
// fetch result from other shards 1 chunk at a time
- // it would be better to do just one big $or query, but then the sorting would not be efficient
+ // it would be better to do just one big $or query, but then the sorting would not be
+ // efficient
string shardName = shardingState.getShardName();
DBConfigPtr confOut = grid.getDBConfig(dbname, false);
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index ce95ef707d6..18e3f4fa383 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -144,7 +144,8 @@ Timer startupSrandTimer;
QueryResult::View emptyMoreResult(long long);
-/* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */
+/* todo: make this a real test. the stuff in dbtests/ seem to do all
+ * dbdirectclient which exhaust doesn't support yet. */
// QueryOption_Exhaust
#define TESTEXHAUST 0
#if (TESTEXHAUST)
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 3f2241126c4..d9749c57b7c 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -1099,7 +1099,8 @@ public:
if (str::equals("collMod", e.fieldName())) {
// no-op
} else if (str::startsWith(e.fieldName(), "$")) {
- // no-op: ignore top-level fields prefixed with $. They are for the command processor.
+ // no-op: ignore top-level fields prefixed with $. They are for the command
+ // processor.
} else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
// no-op
} else if (str::equals("index", e.fieldName())) {
@@ -1359,7 +1360,8 @@ bool _execCommand(OperationContext* txn,
LOG(1) << "command failed because of stale config, can retry" << causedBy(e) << endl;
throw;
} catch (DBException& e) {
- // TODO: Rethrown errors have issues here, should divorce SendStaleConfigException from the DBException tree
+ // TODO: Rethrown errors have issues here, should divorce SendStaleConfigException from the
+ // DBException tree
stringstream ss;
ss << "exception: " << e.what();
diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp
index e9b21758842..482147903c3 100644
--- a/src/mongo/db/dbcommands_generic.cpp
+++ b/src/mongo/db/dbcommands_generic.cpp
@@ -72,56 +72,58 @@ using std::stringstream;
using std::vector;
#if 0
- namespace cloud {
- SimpleMutex mtx("cloud");
- Guarded< vector<string>, mtx > ips;
- bool startedThread = false;
-
- void thread() {
- bson::bo cmd;
- while( 1 ) {
- list<Target> L;
- {
- SimpleMutex::scoped_lock lk(mtx);
- if( ips.ref(lk).empty() )
- continue;
- for( unsigned i = 0; i < ips.ref(lk).size(); i++ ) {
- L.push_back( Target(ips.ref(lk)[i]) );
- }
+namespace cloud {
+ SimpleMutex mtx("cloud");
+ Guarded< vector<string>, mtx > ips;
+ bool startedThread = false;
+
+ void thread() {
+ bson::bo cmd;
+ while( 1 ) {
+ list<Target> L;
+ {
+ SimpleMutex::scoped_lock lk(mtx);
+ if( ips.ref(lk).empty() )
+ continue;
+ for( unsigned i = 0; i < ips.ref(lk).size(); i++ ) {
+ L.push_back( Target(ips.ref(lk)[i]) );
}
+ }
- /** repoll as machines might be down on the first lookup (only if not found previously) */
- sleepsecs(6);
- }
+ /** repoll as machines might be down on the first lookup (only if not found previously)
+ * */
+ sleepsecs(6);
}
}
+}
- class CmdCloud : public Command {
- public:
- CmdCloud() : Command( "cloud" ) { }
- virtual bool slaveOk() const { return true; }
- virtual bool adminOnly() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual void help( stringstream &help ) const {
- help << "internal command facilitating running in certain cloud computing environments";
- }
- bool run(OperationContext* txn, const string& dbname, BSONObj& obj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
- if( !obj.hasElement("servers") ) {
- vector<string> ips;
- obj["servers"].Obj().Vals(ips);
- {
- SimpleMutex::scoped_lock lk(cloud::mtx);
- cloud::ips.ref(lk).swap(ips);
- if( !cloud::startedThread ) {
- cloud::startedThread = true;
- boost::thread thr(cloud::thread);
- }
+class CmdCloud : public Command {
+public:
+ CmdCloud() : Command( "cloud" ) { }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual void help( stringstream &help ) const {
+ help << "internal command facilitating running in certain cloud computing environments";
+ }
+ bool run(OperationContext* txn, const string& dbname, BSONObj& obj, int options,
+ string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ if( !obj.hasElement("servers") ) {
+ vector<string> ips;
+ obj["servers"].Obj().Vals(ips);
+ {
+ SimpleMutex::scoped_lock lk(cloud::mtx);
+ cloud::ips.ref(lk).swap(ips);
+ if( !cloud::startedThread ) {
+ cloud::startedThread = true;
+ boost::thread thr(cloud::thread);
}
}
- return true;
}
- } cmdCloud;
+ return true;
+ }
+} cmdCloud;
#endif
class CmdBuildInfo : public Command {
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index ed9b1144cd2..f7e1d4e776c 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -73,8 +73,8 @@ struct Helpers {
/* fetch a single object from collection ns that matches query.
set your db SavedContext first.
- @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
- won't work.
+ @param query - the query to perform. note this is the low level portion of query so "orderby
+ : ..." won't work.
@param requireIndex if true, assert if no index for the query. a way to guard against
writing a slow query.
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
index 7e17c6235b6..6eacae1ec8d 100644
--- a/src/mongo/db/dbmessage.h
+++ b/src/mongo/db/dbmessage.h
@@ -74,8 +74,8 @@ namespace mongo {
std::string collection;
int nToSkip;
int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
- // greater than zero is simply a hint on how many objects to send back per "cursor batch".
- // a negative number indicates a hard limit.
+ // greater than zero is simply a hint on how many objects to send back per
+ // "cursor batch". a negative number indicates a hard limit.
JSObject query;
[JSObject fieldsToReturn]
dbGetMore:
diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h
index 7a3a1c6a840..fecf9d22361 100644
--- a/src/mongo/db/geo/s2.h
+++ b/src/mongo/db/geo/s2.h
@@ -29,8 +29,8 @@
#pragma once
/*
- * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags (struct vs class)
- * in only the s2.h file
+ * This file's purpose is to confine the suppression of the Clang warning for mismatched-tags
+ * (struct vs class) in only the s2.h file
*/
#ifdef __clang__
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 8f3a1c97e1e..2316ef278b5 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -277,7 +277,8 @@ Status RegexMatchExpression::init(const StringData& path,
}
bool RegexMatchExpression::matchesSingleElement(const BSONElement& e) const {
- // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e << std::endl;
+ // log() << "RegexMatchExpression::matchesSingleElement _regex: " << _regex << " e: " << e <<
+ // std::endl;
switch (e.type()) {
case String:
case Symbol:
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 6cbf51c817b..e570d376f1a 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -790,8 +790,8 @@ Status canonicalizeMongodOptions(moe::Environment* params) {
}
}
- // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc" is
- // set since that comes from the command line.
+ // "storage.mmapv1.preallocDataFiles" comes from the config file, so override it if "noprealloc"
+ // is set since that comes from the command line.
if (params->count("noprealloc")) {
Status ret = params->set("storage.mmapv1.preallocDataFiles",
moe::Value(!(*params)["noprealloc"].as<bool>()));
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 3bce52d3899..8b59b0dfbdc 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -143,7 +143,8 @@ public:
NamespaceString getTargetNSForListIndexesGetMore() const;
/**
- * @return true if the namespace is valid. Special namespaces for internal use are considered as valid.
+ * @return true if the namespace is valid. Special namespaces for internal use are considered as
+ * valid.
*/
bool isValid() const {
return validDBName(db()) && !coll().empty();
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index e5b846fb3aa..47a486eb5f2 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -53,8 +53,8 @@ using std::string;
namespace repl {
namespace {
-// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if it does not,
-// at write time, we can just do an insert, which will be faster.
+// todo / idea: the prefetcher, when it fetches _id, on an upsert, will see if the record exists. if
+// it does not, at write time, we can just do an insert, which will be faster.
// The count (of batches) and time spent fetching pages before application
// -- meaning depends on the prefetch behavior: all, _id index, none, etc.)
@@ -69,8 +69,9 @@ void prefetchIndexPages(OperationContext* txn,
Collection* collection,
const BackgroundSync::IndexPrefetchConfig& prefetchConfig,
const BSONObj& obj) {
- // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op type?
- // One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for inserts.
+ // do we want prefetchConfig to be (1) as-is, (2) for update ops only, or (3) configured per op
+ // type? One might want PREFETCH_NONE for updates, but it's more rare that it is a bad idea for
+ // inserts.
// #3 (per op), a big issue would be "too many knobs".
switch (prefetchConfig) {
case BackgroundSync::PREFETCH_NONE:
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index ade91aabbb9..1d2cf5164eb 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -883,8 +883,8 @@ std::string getProjectedDottedField(const std::string& field, bool* isIDOut) {
// Generate prefix of field up to (but not including) array index.
std::vector<std::string> prefixStrings(res);
prefixStrings.resize(i);
- // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined string
- // to the end of projectedField.
+ // Reset projectedField. Instead of overwriting, joinStringDelim() appends joined
+ // string to the end of projectedField.
std::string projectedField;
mongo::joinStringDelim(prefixStrings, &projectedField, '.');
return projectedField;
diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h
index 1503a3eecb0..885f0e7d7a6 100644
--- a/src/mongo/db/query/planner_access.h
+++ b/src/mongo/db/query/planner_access.h
@@ -218,8 +218,8 @@ public:
// a filter on the entire tree.
// 2. No fetches performed. There will be a final fetch by the caller of buildIndexedDataAccess
// who set the value of inArrayOperator to true.
- // 3. No compound indices are used and no bounds are combined. These are incorrect in the context
- // of these operators.
+ // 3. No compound indices are used and no bounds are combined. These are incorrect in the
+ // context of these operators.
//
/**
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 59e66c5485f..65c76400089 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -2177,7 +2177,8 @@ TEST_F(QueryPlannerTest, ElemMatchNestedOrNotIndexed) {
//
// Geo
-// http://docs.mongodb.org/manual/reference/operator/query-geospatial/#geospatial-query-compatibility-chart
+// http://docs.mongodb.org/
+// manual/reference/operator/query-geospatial/#geospatial-query-compatibility-chart
//
TEST_F(QueryPlannerTest, Basic2DNonNear) {
@@ -3185,15 +3186,16 @@ TEST_F(QueryPlannerTest, NoMergeSortIfNoSortWanted) {
/*
TEST_F(QueryPlannerTest, SortOnGeoQuery) {
addIndex(BSON("timestamp" << -1 << "position" << "2dsphere"));
- BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates: [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}");
- BSONObj sort = fromjson("{timestamp: -1}");
+ BSONObj query = fromjson("{position: {$geoWithin: {$geometry: {type: \"Polygon\", coordinates:
+ [[[1, 1], [1, 90], [180, 90], [180, 1], [1, 1]]]}}}}"); BSONObj sort = fromjson("{timestamp:
+ -1}");
runQuerySortProj(query, sort, BSONObj());
ASSERT_EQUALS(getNumSolutions(), 2U);
assertSolutionExists("{sort: {pattern: {timestamp: -1}, limit: 0, "
"node: {cscan: {dir: 1}}}}");
- assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position: '2dsphere'}}}}}");
-}
+ assertSolutionExists("{fetch: {node: {ixscan: {pattern: {timestamp: -1, position:
+ '2dsphere'}}}}}"); }
TEST_F(QueryPlannerTest, SortOnGeoQueryMultikey) {
// true means multikey
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index f1a2b36ef33..175c3a1fe9d 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -405,7 +405,8 @@ bool BackgroundSync::_rollbackIfNeeded(OperationContext* txn, OplogReader& r) {
syncRollback(txn, _replCoord->getMyLastOptime(), &r, _replCoord);
return true;
}
- /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
+ /* we're not ahead? maybe our new query got fresher data. best to come back and try
+ * again */
log() << "replSet syncTail condition 1";
sleepsecs(1);
} catch (DBException& e) {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 8844ae7828d..7b3457008ba 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -28,11 +28,13 @@
/* Collections we use:
- local.sources - indicates what sources we pull from as a "slave", and the last update of each
+ local.sources - indicates what sources we pull from as a "slave", and the last update of
+ each
local.oplog.$main - our op log as "master"
local.dbinfo.<dbname> - no longer used???
- local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy.
- used when replacing other half of the pair which has permanently failed.
+ local.pair.startup - [deprecated] can contain a special value indicating for a pair that we
+ have the master copy. used when replacing other half of the pair which
+ has permanently failed.
local.pair.sync - [deprecated] { initialsynccomplete: 1 }
*/
@@ -667,7 +669,8 @@ void ReplSource::_sync_pullOpLog_applyOperation(OperationContext* txn,
scoped_ptr<Lock::GlobalWrite> lk(alreadyLocked ? 0 : new Lock::GlobalWrite(txn->lockState()));
if (replAllDead) {
- // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ // hmmm why is this check here and not at top of this function? does it get set between top
+ // and here?
log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
throw SyncException();
}
@@ -926,7 +929,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
verify(syncedTo < nextOpTime);
throw SyncException();
} else {
- /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
+ /* t == syncedTo, so the first op was applied previously or it is the first op of
+ * initial query and need not be applied. */
}
}
@@ -1048,7 +1052,8 @@ int ReplSource::sync(OperationContext* txn, int& nApplied) {
}
nClonedThisPass = 0;
- // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
+ // FIXME Handle cases where this db isn't on default port, or default port is spec'd in
+ // hostName.
if ((string("localhost") == hostName || string("127.0.0.1") == hostName) &&
serverGlobalParams.port == ServerGlobalParams::DefaultDBPort) {
log() << "repl: can't sync from self (localhost). sources configuration may be wrong."
@@ -1157,9 +1162,8 @@ static void replMain(OperationContext* txn) {
break;
}
}
- verify(
- syncing ==
- 0); // i.e., there is only one sync thread running. we will want to change/fix this.
+ // i.e., there is only one sync thread running. we will want to change/fix this.
+ verify(syncing == 0);
syncing++;
}
@@ -1337,7 +1341,8 @@ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
void pretouchOperation(OperationContext* txn, const BSONObj& op) {
if (txn->lockState()->isWriteLocked()) {
- return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+ return;
}
const char* which = "o";
diff --git a/src/mongo/db/repl/master_slave.h b/src/mongo/db/repl/master_slave.h
index 74e509302f7..f0aee54e8d0 100644
--- a/src/mongo/db/repl/master_slave.h
+++ b/src/mongo/db/repl/master_slave.h
@@ -35,7 +35,8 @@
/* replication data overview
at the slave:
- local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass:
+ { ... }, incompleteCloneDbs: { ... } }
at the master:
local.oplog.$<source>
@@ -71,7 +72,8 @@ public:
Can be a group of things to replicate for several databases.
- { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs:
+ { ... } }
'source' defaults to 'main'; support for multiple source names is
not done (always use main for now).
@@ -137,8 +139,9 @@ public:
std::string sourceName() const {
return _sourceName.empty() ? "main" : _sourceName;
}
- std::string
- only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+ // only a certain db. note that in the sources collection, this may not be changed once you
+ // start replicating.
+ std::string only;
/* the last time point we have already synced up to (in the remote/master's oplog). */
OpTime syncedTo;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index df2c04b8e22..1d131d0af1e 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -510,7 +510,8 @@ void createOplog(OperationContext* txn) {
// -------------------------------------
/** @param fromRepl false if from ApplyOpsCmd
- @return true if was and update should have happened and the document DNE. see replset initial sync code.
+ @return true if was and update should have happened and the document DNE.
+ see replset initial sync code.
*/
bool applyOperation_inlock(OperationContext* txn,
Database* db,
@@ -602,8 +603,8 @@ bool applyOperation_inlock(OperationContext* txn,
<< "warning, repl doing slow updates (no _id field) for " << ns << endl;
}
} else {
- /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
- then. very few upserts will not be inserts...
+ /* todo : it may be better to do an insert here, and then catch the dup key
+ * exception and do update then. very few upserts will not be inserts...
*/
BSONObjBuilder b;
b.append(_id);
@@ -648,8 +649,8 @@ bool applyOperation_inlock(OperationContext* txn,
log() << "replication failed to apply update: " << op.toString() << endl;
}
// need to check to see if it isn't present so we can set failedUpdate correctly.
- // note that adds some overhead for this extra check in some cases, such as an updateCriteria
- // of the form
+ // note that adds some overhead for this extra check in some cases, such as an
+ // updateCriteria of the form
// { _id:..., { x : {$size:...} }
// thus this is not ideal.
else {
@@ -663,8 +664,8 @@ bool applyOperation_inlock(OperationContext* txn,
log() << "replication couldn't find doc: " << op.toString() << endl;
}
- // Otherwise, it's present; zero objects were updated because of additional specifiers
- // in the query for idempotence
+ // Otherwise, it's present; zero objects were updated because of additional
+ // specifiers in the query for idempotence
}
} else {
// this could happen benignly on an oplog duplicate replay of an upsert
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index 5c1e6032acc..6ba6d9dbd44 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -51,7 +51,8 @@ class ReplSettings {
public:
SlaveTypes slave;
- /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
+ /** true means we are master and doing replication. if we are not writing to oplog, this won't
+ * be true. */
bool master;
bool fastsync;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 079f6e2227a..b1114bd9d1f 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -521,7 +521,7 @@ TEST_F(ReplCoordTest, ReconfigDuringHBReconfigFails) {
// net->exitNetwork();
// stopCapturingLogMessages();
// ASSERT_EQUALS(1,
-// countLogLinesContaining("because already in the midst of a configuration process"));
+// countLogLinesContaining("because already in the midst of a configuration process"));
// reconfigThread.join();
// logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Log());
// }
diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp
index 72449c7c6a5..be4caa5e2bf 100644
--- a/src/mongo/db/repl/replication_info.cpp
+++ b/src/mongo/db/repl/replication_info.cpp
@@ -112,7 +112,8 @@ void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int le
if (level > 1) {
wassert(!txn->lockState()->isLocked());
- // note: there is no so-style timeout on this connection; perhaps we should have one.
+ // note: there is no so-style timeout on this connection; perhaps we should have
+ // one.
ScopedDbConnection conn(s["host"].valuestr());
DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn());
diff --git a/src/mongo/db/repl/scoped_conn.h b/src/mongo/db/repl/scoped_conn.h
index 85f6dd5080f..80cc1810250 100644
--- a/src/mongo/db/repl/scoped_conn.h
+++ b/src/mongo/db/repl/scoped_conn.h
@@ -78,9 +78,10 @@ public:
connInfo->setTimeout(timeout);
}
- /* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic.
- So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
- ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed.
+ /* If we were to run a query and not exhaust the cursor, future use of the connection would be
+ * problematic. So here what we do is wrapper known safe methods and not allow cursor-style
+ * queries at all. This makes ScopedConn limited in functionality but very safe. More
+ * non-cursor wrappers can be added here if needed.
*/
bool runCommand(const std::string& dbname, const BSONObj& cmd, BSONObj& info, int options = 0) {
return conn()->runCommand(dbname, cmd, info, options);
diff --git a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
index 8742f25e285..bee3fb4f86a 100644
--- a/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
+++ b/src/mongo/db/storage/mmap_v1/aligned_builder.cpp
@@ -134,8 +134,8 @@ void AlignedBuilder::_malloc(unsigned sz) {
_p._allocationAddress = p;
_p._data = (char*)p;
#elif defined(__linux__)
- // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be tested on solaris.
- // so for now, linux only for this.
+ // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be
+ // tested on solaris. so for now, linux only for this.
void* p = 0;
int res = posix_memalign(&p, Alignment, sz);
massert(13524, "out of memory AlignedBuilder", res == 0);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 317ab5919cd..55acad840af 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -2382,10 +2382,10 @@ public:
}
// too much work to try to make this happen through inserts and deletes
// we are intentionally manipulating the btree bucket directly here
- BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
- writing(L)->Null();
- writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
- BSONObj k = BSON( "a" << toInsert );
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1
+ ).prevChildBucket ); writing(L)->Null();
+ writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() )
+ |= 1; // make unused BSONObj k = BSON( "a" << toInsert );
Base::insert( k );
}
};
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.cpp b/src/mongo/db/storage/mmap_v1/btree/key.cpp
index baa934c525f..915171d4b0e 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/key.cpp
@@ -197,7 +197,8 @@ int KeyBson::woCompare(const KeyBson& r, const Ordering& o) const {
return oldCompare(_o, r._o, o);
}
-// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+// woEqual could be made faster than woCompare but this is for backward compatibility so not worth a
+// big effort
bool KeyBson::woEqual(const KeyBson& r) const {
return oldCompare(_o, r._o, nullOrdering) == 0;
}
@@ -495,7 +496,8 @@ static int compare(const unsigned char*& l, const unsigned char*& r) {
int llen = binDataCodeToLength(L);
int diff = L - R; // checks length and subtype simultaneously
if (diff) {
- // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit
+ // swap...)
int rlen = binDataCodeToLength(R);
if (llen != rlen)
return llen - rlen;
diff --git a/src/mongo/db/storage/mmap_v1/btree/key.h b/src/mongo/db/storage/mmap_v1/btree/key.h
index 4787d83281a..d6546a76d77 100644
--- a/src/mongo/db/storage/mmap_v1/btree/key.h
+++ b/src/mongo/db/storage/mmap_v1/btree/key.h
@@ -83,8 +83,9 @@ class KeyV1Owned;
class KeyV1 {
void operator=(
const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
- KeyV1(
- const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ KeyV1(const KeyV1Owned&);
+
public:
KeyV1() {
_keyData = 0;
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace.h b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
index d24f576bb01..d854eeff989 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace.h
@@ -84,12 +84,12 @@ public:
return buf;
}
- /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
- (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
- file support. */
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more
+ * than 10 indexes (more than 10 IndexDetails). It's a bit hacky because of this late addition
+ * with backward file support. */
std::string extraName(int i) const;
- bool isExtra()
- const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+ bool isExtra() const;
enum MaxNsLenValue {
// Maximum possible length of name any namespace, including special ones like $extra.
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
index 42ca38a36e2..1aee8f9ad1a 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.h
@@ -97,8 +97,8 @@ public:
DiskLoc capExtent; // the "current" extent we're writing too for a capped collection
DiskLoc capFirstNewRecord;
- unsigned short
- _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short _dataFileVersion;
unsigned short _indexFileVersion;
unsigned long long multiKeyIndexBits;
diff --git a/src/mongo/db/storage/mmap_v1/data_file.h b/src/mongo/db/storage/mmap_v1/data_file.h
index 64df4ebdd61..a78ac911c5b 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.h
+++ b/src/mongo/db/storage/mmap_v1/data_file.h
@@ -126,8 +126,9 @@ class DataFileHeader {
public:
DataFileVersion version;
int fileLength;
- DiskLoc
- unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more
+ * */
+ DiskLoc unused;
int unusedLength;
DiskLoc freeListStart;
DiskLoc freeListEnd;
diff --git a/src/mongo/db/storage/mmap_v1/diskloc.h b/src/mongo/db/storage/mmap_v1/diskloc.h
index 1e4d8649022..9f49a39e5d1 100644
--- a/src/mongo/db/storage/mmap_v1/diskloc.h
+++ b/src/mongo/db/storage/mmap_v1/diskloc.h
@@ -52,12 +52,15 @@ class BtreeBucket;
(such as adding a virtual function)
*/
class DiskLoc {
- int _a; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
+ // this will be volume, file #, etc. but is a logical value could be anything depending on
+ // storage engine
+ int _a;
int ofs;
public:
enum SentinelValues {
- /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
NullOfs = -1,
// Caps the number of files that may be allocated in a database, allowing about 32TB of
@@ -96,8 +99,9 @@ public:
}
DiskLoc& Null() {
_a = -1;
- ofs =
- 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but
+ * outside DiskLoc context so confusing as-is. */
+ ofs = 0;
return *this;
}
void assertOk() const {
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index fc0bcdf84c2..cfc5c72db05 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -34,19 +34,22 @@
we could be in read lock for this
for very large objects write directly to redo log in situ?
WRITETOJOURNAL
- we could be unlocked (the main db lock that is...) for this, with sufficient care, but there is some complexity
- have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that).
- for now (1.7.5/1.8.0) we are in read lock which is not ideal.
+ we could be unlocked (the main db lock that is...) for this, with sufficient care, but there
+ is some complexity have to handle falling behind which would use too much ram (going back
+ into a read lock would suffice to stop that). for now (1.7.5/1.8.0) we are in read lock which
+ is not ideal.
WRITETODATAFILES
- actually write to the database data files in this phase. currently done by memcpy'ing the writes back to
- the non-private MMF. alternatively one could write to the files the traditional way; however the way our
- storage engine works that isn't any faster (actually measured a tiny bit slower).
+ actually write to the database data files in this phase. currently done by memcpy'ing the
+ writes back to the non-private MMF. alternatively one could write to the files the
+ traditional way; however the way our storage engine works that isn't any faster (actually
+ measured a tiny bit slower).
REMAPPRIVATEVIEW
- we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
- remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
- to be too frequent.
- there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
- be required. so doing these remaps fractionally is helpful.
+ we could in a write lock quickly flip readers back to the main view, then stay in read lock
+ and do our real remapping. with many files (e.g., 1000), remapping could be time consuming
+ (several ms), so we don't want to be too frequent.
+
+ there could be a slow down immediately after remapping as fresh copy-on-writes for commonly
+ written pages will be required. so doing these remaps fractionally is helpful.
mutexes:
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index c1e56b9790f..0ab2ff648ca 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -390,10 +390,10 @@ void _preallocateFiles() {
unsigned long long limit = DataLimitPerJournalFile;
if (debug && i == 1) {
- // moving 32->64, the prealloc files would be short. that is "ok", but we want to exercise that
- // case, so we force exercising here when _DEBUG is set by arbitrarily stopping prealloc at a low
- // limit for a file. also we want to be able to change in the future the constant without a lot of
- // work anyway.
+ // moving 32->64, the prealloc files would be short. that is "ok", but we want to
+ // exercise that case, so we force exercising here when _DEBUG is set by arbitrarily
+ // stopping prealloc at a low limit for a file. also we want to be able to change in
+ // the future the constant without a lot of work anyway.
limit = 16 * 1024 * 1024;
}
preallocateFile(filepath, limit);
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalformat.h b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
index 3c31c2686dd..6392a5600fa 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalformat.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalformat.h
@@ -49,11 +49,12 @@ struct JHeader {
JHeader() {}
JHeader(std::string fname);
- char magic
- [2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
+ // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or
+ // something...
+ char magic[2];
-// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
-// that. simply incrementing the version # is safe on a fwd basis.
+// x4142 is asci--readable if you look at the file with head/less -- thus the starting values were
+// near that. simply incrementing the version # is safe on a fwd basis.
#if defined(_NOCOMPRESS)
enum { CurrentVersion = 0x4148 };
#else
@@ -62,15 +63,15 @@ struct JHeader {
unsigned short _version;
// these are just for diagnostic ease (make header more useful as plain text)
- char n1; // '\n'
- char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
- char n2; // '\n'
- char dbpath
- [128]; // path/filename of this file for human reading and diagnostics. not used by code.
- char n3, n4; // '\n', '\n'
+ char n1; // '\n'
+ char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
+ char n2; // '\n'
+ char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by
+ // code.
+ char n3, n4; // '\n', '\n'
- unsigned long long
- fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+ // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+ unsigned long long fileId;
char reserved3[8026]; // 8KB total for the file header
char txt2[2]; // "\n\n" at the end
@@ -112,7 +113,8 @@ public:
};
/** an individual write operation within a group commit section. Either the entire section should
- be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
+ be applied, or nothing. (We check the md5 for the whole section before doing anything on
+ recovery.)
*/
struct JEntry {
enum OpCodes {
diff --git a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
index 06441653fe3..e51608b69e4 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
+++ b/src/mongo/db/storage/mmap_v1/dur_journalimpl.h
@@ -108,8 +108,8 @@ private:
static void preFlush();
static void postFlush();
unsigned long long _preFlushTime;
- unsigned long long
- _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ unsigned long long _lastFlushTime;
bool _writeToLSNNeeded;
void updateLSNFile();
};
diff --git a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
index 3caf910ab37..ea3b4e85148 100644
--- a/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_preplogbuffer.cpp
@@ -69,7 +69,9 @@ static DurableMappedFile* findMMF_inlock(void* ptr, size_t& ofs) {
DurableMappedFile* f = privateViews.find_inlock(ptr, ofs);
if (f == 0) {
error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
- printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
+ // we want a stack trace and the assert below didn't print a trace once in the real world -
+ // not sure why
+ printStackTrace();
stringstream ss;
ss << "view pointer cannot be resolved " << std::hex << (size_t)ptr;
journalingFailure(ss.str().c_str()); // asserts, which then abends
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index 49daa8c6550..c37fbd23ef7 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -207,7 +207,8 @@ public:
_entries->skip(len + 1); // skip '\0' too
_entries->read(lenOrOpCode); // read this for the fall through
}
- // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
+ // fall through as a basic operation always follows jdbcontext, and we don't have
+ // anything to return yet
default:
// fall through
@@ -520,7 +521,8 @@ bool RecoveryJob::processFile(boost::filesystem::path journalfile) {
return true;
}
} catch (...) {
- // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ // if something weird like a permissions problem keep going so the massert down below can
+ // happen (presumably)
log() << "recover exception checking filesize" << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/dur_stats.h b/src/mongo/db/storage/mmap_v1/dur_stats.h
index 8ec6f8c024f..35cee01d0c6 100644
--- a/src/mongo/db/storage/mmap_v1/dur_stats.h
+++ b/src/mongo/db/storage/mmap_v1/dur_stats.h
@@ -33,8 +33,9 @@
namespace mongo {
namespace dur {
-/** journaling stats. the model here is that the commit thread is the only writer, and that reads are
- uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
+/** journaling stats. the model here is that the commit thread is the only writer, and that reads
+ * are uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter
+ * overhead.
*/
struct Stats {
struct S {
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index d8757c90622..92cc7e84ef9 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -28,8 +28,10 @@
* it in the license file.
*/
-/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this.
+/* this module adds some of our layers atop memory mapped files - specifically our handling of
+ * private views & such
+ if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile
+ class, not this.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage
@@ -275,9 +277,8 @@ bool DurableMappedFile::finishOpening() {
"(look in log for "
"more information)");
}
- privateViews.add_inlock(
- _view_private,
- this); // note that testIntent builds use this, even though it points to view_write then...
+ // note that testIntent builds use this, even though it points to view_write then...
+ privateViews.add_inlock(_view_private, this);
} else {
_view_private = _view_write;
}
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
index a34fdef6fb2..230dbc31349 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.h
@@ -36,9 +36,9 @@
namespace mongo {
-/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of private views & such.
- if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
- not this.
+/** DurableMappedFile adds some layers atop memory mapped files - specifically our handling of
+ * private views & such. if you don't care about journaling/durability (temp sort files & such) use
+ * MemoryMappedFile class, not this.
*/
class DurableMappedFile : private MemoryMappedFile {
protected:
@@ -275,6 +275,7 @@ inline void PointerToDurableMappedFile::makeWritable(void* privateView, unsigned
inline void PointerToDurableMappedFile::makeWritable(void* _p, unsigned len) {}
#endif
-// allows a pointer into any private view of a DurableMappedFile to be resolved to the DurableMappedFile object
+// allows a pointer into any private view of a DurableMappedFile to be resolved to the
+// DurableMappedFile object
extern PointerToDurableMappedFile privateViews;
}
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index d7d36bc36e3..754e486f1c2 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -136,9 +136,9 @@ bool FileCreatedOp::needFilesClosed() {
}
void FileCreatedOp::replay() {
- // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
- // or rewrite at least, even if it were the right length. perhaps one day we should change that
- // although easier to avoid defects if we assume it is zeros perhaps.
+ // i believe the code assumes new files are filled with zeros. thus we have to recreate the
+ // file, or rewrite at least, even if it were the right length. perhaps one day we should
+ // change that although easier to avoid defects if we assume it is zeros perhaps.
string full = _p.asFullPath();
if (boost::filesystem::exists(full)) {
try {
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index 337f8177970..b6d80538524 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -44,9 +44,9 @@ namespace dur {
/** DurOp - Operations we journal that aren't just basic writes.
*
- * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
- * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
- * them (we don't want a vtable for example there).
+ * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct
+ * dur::WriteIntent. We don't make WriteIntent inherit from DurOp to keep it as lean as possible as
+ * there will be millions of them (we don't want a vtable for example there).
*
* For each op we want to journal, we define a subclass.
*/
diff --git a/src/mongo/db/storage/mmap_v1/extent.h b/src/mongo/db/storage/mmap_v1/extent.h
index 9d6d3935346..16af89fb42b 100644
--- a/src/mongo/db/storage/mmap_v1/extent.h
+++ b/src/mongo/db/storage/mmap_v1/extent.h
@@ -42,7 +42,8 @@ namespace mongo {
/* extents are datafile regions where all the records within the region
belong to the same namespace.
-(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+(11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big
+ DeletedRecord
(11:12:55 AM) dm10gen: and that is placed on the free list
*/
#pragma pack(1)
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index c34cf60df5f..2b1cb3dfb15 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -122,7 +122,8 @@ public:
sleepmillis(4);
}
long long y = t2.micros() - 4 * N * 1000;
- // not really trusting the timer granularity on all platforms so whichever is higher of x and y
+ // not really trusting the timer granularity on all platforms so whichever is higher
+ // of x and y
bb[pass].append("8KBWithPauses", max(x, y) / (N * 1000.0));
}
{
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index aaa28e1f1fc..41cf7b70a91 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -443,8 +443,8 @@ DiskLoc MmapV1ExtentManager::_allocFromFreeList(OperationContext* txn,
break;
}
if (t.seconds() >= 2) {
- // have spent lots of time in write lock, and we are in [low,high], so close enough
- // could come into play if extent freelist is very long
+ // have spent lots of time in write lock, and we are in [low,high], so close
+ // enough could come into play if extent freelist is very long
break;
}
} else {
diff --git a/src/mongo/db/storage/mmap_v1/record.h b/src/mongo/db/storage/mmap_v1/record.h
index 131b1144ff6..de3ffaef2cb 100644
--- a/src/mongo/db/storage/mmap_v1/record.h
+++ b/src/mongo/db/storage/mmap_v1/record.h
@@ -42,13 +42,16 @@ class DeletedRecord;
/* Record is a record in a datafile. DeletedRecord is similar but for deleted space.
*11:03:20 AM) dm10gen: regarding extentOfs...
-(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+(11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and
+ DeleteRecords
(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
-(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full
+ extent address, we keep just the offset
(11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
-(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs then
+(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs
+ then
*/
#pragma pack(1)
class Record {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index 9b1817707c9..a0b9e770ead 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -791,7 +791,8 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
if (loc.questionable()) {
if (isCapped() && !loc.isValid() && i == 1) {
- /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
+ /* the constructor for NamespaceDetails intentionally sets
+ * deletedList[1] to invalid
see comments in namespace.h
*/
break;
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
index 22242cd6ce2..9281f64c5ac 100644
--- a/src/mongo/dbtests/chunktests.cpp
+++ b/src/mongo/dbtests/chunktests.cpp
@@ -318,8 +318,8 @@ class InMultiShard : public CompoundKeyBase {
return BSON("a" << BSON("$in" << BSON_ARRAY(0 << 5 << 10)) << "b"
<< BSON("$in" << BSON_ARRAY(0 << 5 << 25)));
}
- // If we were to send this query to just the shards it actually needed to hit, it would only hit shards 0 and 2
- // Because of the optimization from SERVER-4745, however, we'll also hit shard 1.
+ // If we were to send this query to just the shards it actually needed to hit, it would only hit
+ // shards 0 and 2 Because of the optimization from SERVER-4745, however, we'll also hit shard 1.
virtual BSONArray expectedShardNames() const {
return BSON_ARRAY("0"
<< "1"
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index e1f1a057d77..b9bc00cde19 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -189,9 +189,8 @@ void keyTest(const BSONObj& o, bool mustBeCompact = false) {
cout << r3 << endl;
}
ASSERT(ok);
- if (k.isCompactFormat() &&
- kLast
- ->isCompactFormat()) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ // only check if not bson as bson woEqual is broken! (or was may2011)
+ if (k.isCompactFormat() && kLast->isCompactFormat()) {
if (k.woEqual(*kLast) != (r2 == 0)) { // check woEqual matches
cout << r2 << endl;
cout << k.toString() << endl;
@@ -901,7 +900,8 @@ public:
ASSERT_EQUALS("123.4567891234568", x["d"].toString(false, true));
ASSERT_EQUALS("123456789.1234568", x["e"].toString(false, true));
- // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows
+ // and *nix are different - TODO, work around for test or not bother?
ASSERT_EQUALS("-123.456", x["g"].toString(false, true));
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 2842daffea3..1a4f0864b43 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -639,7 +639,9 @@ public:
ASSERT_EQUALS(5, out["z"].number());
ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type());
// Commenting so that v8 tests will work
- // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+ // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type()
+ // ); // TODO: this is technically bad, but here to make sure that i understand
+ // the behavior
// Eliot says I don't have to worry about this case
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index c58db812480..d19c3ffdf20 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -141,327 +141,327 @@ public:
namespace NamespaceDetailsTests {
#if 0 // SERVER-13640
- class Base {
- const char *ns_;
- Lock::GlobalWrite lk;
- Client::Context _context;
- public:
- Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
- virtual ~Base() {
- OperationContextImpl txn;
- if ( !nsd() )
- return;
- _context.db()->dropCollection( &txn, ns() );
- }
- protected:
- void create() {
- Lock::GlobalWrite lk;
- OperationContextImpl txn;
- ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
- }
- virtual string spec() const = 0;
- int nRecords() const {
- int count = 0;
- const Extent* ext;
- for ( RecordId extLoc = nsd()->firstExtent();
- !extLoc.isNull();
- extLoc = ext->xnext) {
- ext = extentManager()->getExtent(extLoc);
- int fileNo = ext->firstRecord.a();
- if ( fileNo == -1 )
- continue;
- for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs;
- recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) {
- ++count;
- }
- }
- ASSERT_EQUALS( count, nsd()->numRecords() );
- return count;
- }
- int nExtents() const {
- int count = 0;
- for ( RecordId extLoc = nsd()->firstExtent();
- !extLoc.isNull();
- extLoc = extentManager()->getExtent(extLoc)->xnext ) {
- ++count;
- }
- return count;
- }
- const char *ns() const {
- return ns_;
- }
- const NamespaceDetails *nsd() const {
- Collection* c = collection();
- if ( !c )
- return NULL;
- return c->detailsDeprecated();
- }
- const RecordStore* recordStore() const {
- Collection* c = collection();
- if ( !c )
- return NULL;
- return c->getRecordStore();
- }
- Database* db() const {
- return _context.db();
- }
- const ExtentManager* extentManager() const {
- return db()->getExtentManager();
- }
- Collection* collection() const {
- return db()->getCollection( ns() );
+ class Base {
+ const char *ns_;
+ Lock::GlobalWrite lk;
+ Client::Context _context;
+ public:
+ Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
+ virtual ~Base() {
+ OperationContextImpl txn;
+ if ( !nsd() )
+ return;
+ _context.db()->dropCollection( &txn, ns() );
+ }
+ protected:
+ void create() {
+ Lock::GlobalWrite lk;
+ OperationContextImpl txn;
+ ASSERT( userCreateNS( &txn, db(), ns(), fromjson( spec() ), false ).isOK() );
+ }
+ virtual string spec() const = 0;
+ int nRecords() const {
+ int count = 0;
+ const Extent* ext;
+ for ( RecordId extLoc = nsd()->firstExtent();
+ !extLoc.isNull();
+ extLoc = ext->xnext) {
+ ext = extentManager()->getExtent(extLoc);
+ int fileNo = ext->firstRecord.a();
+ if ( fileNo == -1 )
+ continue;
+ for ( int recOfs = ext->firstRecord.getOfs(); recOfs != RecordId::NullOfs;
+ recOfs = recordStore()->recordFor(RecordId(fileNo, recOfs))->nextOfs() ) {
+ ++count;
}
+ }
+ ASSERT_EQUALS( count, nsd()->numRecords() );
+ return count;
+ }
+ int nExtents() const {
+ int count = 0;
+ for ( RecordId extLoc = nsd()->firstExtent();
+ !extLoc.isNull();
+ extLoc = extentManager()->getExtent(extLoc)->xnext ) {
+ ++count;
+ }
+ return count;
+ }
+ const char *ns() const {
+ return ns_;
+ }
+ const NamespaceDetails *nsd() const {
+ Collection* c = collection();
+ if ( !c )
+ return NULL;
+ return c->detailsDeprecated();
+ }
+ const RecordStore* recordStore() const {
+ Collection* c = collection();
+ if ( !c )
+ return NULL;
+ return c->getRecordStore();
+ }
+ Database* db() const {
+ return _context.db();
+ }
+ const ExtentManager* extentManager() const {
+ return db()->getExtentManager();
+ }
+ Collection* collection() const {
+ return db()->getCollection( ns() );
+ }
- static BSONObj bigObj() {
- BSONObjBuilder b;
- b.appendOID("_id", 0, true);
- string as( 187, 'a' );
- b.append( "a", as );
- return b.obj();
- }
+ static BSONObj bigObj() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ string as( 187, 'a' );
+ b.append( "a", as );
+ return b.obj();
+ }
- };
-
- class Create : public Base {
- public:
- void run() {
- create();
- ASSERT( nsd() );
- ASSERT_EQUALS( 0, nRecords() );
- ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
- RecordId initial = RecordId();
- initial.setInvalid();
- ASSERT( initial == nsd()->capFirstNewRecord() );
- }
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
-
- class SingleAlloc : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
- BSONObj b = bigObj();
- ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
- ASSERT_EQUALS( 1, nRecords() );
- }
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
-
- class Realloc : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
-
- const int N = 20;
- const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
- RecordId l[ N ];
- for ( int i = 0; i < N; ++i ) {
- BSONObj b = bigObj();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, b, true );
- ASSERT( status.isOK() );
- l[ i ] = status.getValue();
- ASSERT( !l[ i ].isNull() );
- ASSERT( nRecords() <= Q );
- //ASSERT_EQUALS( 1 + i % 2, nRecords() );
- if ( i >= 16 )
- ASSERT( l[ i ] == l[ i - Q] );
- }
- }
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
-
- class TwoExtent : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
- ASSERT_EQUALS( 2, nExtents() );
-
- RecordId l[ 8 ];
- for ( int i = 0; i < 8; ++i ) {
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigObj(), true );
- ASSERT( status.isOK() );
- l[ i ] = status.getValue();
- ASSERT( !l[ i ].isNull() );
- //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- //if ( i > 3 )
- // ASSERT( l[ i ] == l[ i - 4 ] );
- }
- ASSERT( nRecords() == 8 );
-
- // Too big
- BSONObjBuilder bob;
- bob.appendOID( "_id", NULL, true );
- bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
- BSONObj bigger = bob.done();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, false );
- ASSERT( !status.isOK() );
- ASSERT_EQUALS( 0, nRecords() );
- }
- private:
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
- }
- };
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT( nsd() );
+ ASSERT_EQUALS( 0, nRecords() );
+ ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
+ RecordId initial = RecordId();
+ initial.setInvalid();
+ ASSERT( initial == nsd()->capFirstNewRecord() );
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
+ class SingleAlloc : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ BSONObj b = bigObj();
+ ASSERT( collection()->insertDocument( &txn, b, true ).isOK() );
+ ASSERT_EQUALS( 1, nRecords() );
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
- BSONObj docForRecordSize( int size ) {
- BSONObjBuilder b;
- b.append( "_id", 5 );
- b.append( "x", string( size - Record::HeaderSize - 22, 'x' ) );
- BSONObj x = b.obj();
- ASSERT_EQUALS( Record::HeaderSize + x.objsize(), size );
- return x;
+ class Realloc : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+
+ const int N = 20;
+ const int Q = 16; // these constants depend on the size of the bson object, the extent size
+ allocated by the system too RecordId l[ N ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj b = bigObj();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, b, true );
+ ASSERT( status.isOK() );
+ l[ i ] = status.getValue();
+ ASSERT( !l[ i ].isNull() );
+ ASSERT( nRecords() <= Q );
+ //ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i >= 16 )
+ ASSERT( l[ i ] == l[ i - Q] );
}
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
- /**
- * alloc() does not quantize records in capped collections.
- * NB: this actually tests that the code in Database::createCollection doesn't set
- * PowerOf2Sizes for capped collections.
- */
- class AllocCappedNotQuantized : public Base {
- public:
- void run() {
- OperationContextImpl txn;
- create();
- ASSERT( nsd()->isCapped() );
- ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
-
- StatusWith<RecordId> result =
- collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
- ASSERT( result.isOK() );
- Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
- // Check that no quantization is performed.
- ASSERT_EQUALS( 300, record->lengthWithHeaders() );
- }
- virtual string spec() const { return "{capped:true,size:2048}"; }
- };
+ class TwoExtent : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ RecordId l[ 8 ];
+ for ( int i = 0; i < 8; ++i ) {
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigObj(), true );
+ ASSERT( status.isOK() );
+ l[ i ] = status.getValue();
+ ASSERT( !l[ i ].isNull() );
+ //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ //if ( i > 3 )
+ // ASSERT( l[ i ] == l[ i - 4 ] );
+ }
+ ASSERT( nRecords() == 8 );
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID( "_id", NULL, true );
+ bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
+ BSONObj bigger = bob.done();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, false );
+ ASSERT( !status.isOK() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ private:
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ };
- /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc)
- */
- class TruncateCapped : public Base {
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
- }
- void pass(int p) {
- OperationContextImpl txn;
- create();
- ASSERT_EQUALS( 2, nExtents() );
-
- BSONObj b = bigObj();
-
- int N = MinExtentSize / b.objsize() * nExtents() + 5;
- int T = N - 4;
-
- RecordId truncAt;
- //RecordId l[ 8 ];
- for ( int i = 0; i < N; ++i ) {
- BSONObj bb = bigObj();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
- ASSERT( status.isOK() );
- RecordId a = status.getValue();
- if( T == i )
- truncAt = a;
- ASSERT( !a.isNull() );
- /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
- if ( i > 3 )
- ASSERT( l[ i ] == l[ i - 4 ] );*/
- }
- ASSERT( nRecords() < N );
-
- RecordId last, first;
- {
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::BACKWARD));
- runner->getNext(NULL, &last);
- ASSERT( !last.isNull() );
- }
- {
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::FORWARD));
- runner->getNext(NULL, &first);
- ASSERT( !first.isNull() );
- ASSERT( first != last ) ;
- }
-
- collection()->temp_cappedTruncateAfter(&txn, truncAt, false);
- ASSERT_EQUALS( collection()->numRecords() , 28u );
-
- {
- RecordId loc;
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::FORWARD));
- runner->getNext(NULL, &loc);
- ASSERT( first == loc);
- }
- {
- auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
- ns(),
- collection(),
- InternalPlanner::BACKWARD));
- RecordId loc;
- runner->getNext(NULL, &loc);
- ASSERT( last != loc );
- ASSERT( !last.isNull() );
- }
-
- // Too big
- BSONObjBuilder bob;
- bob.appendOID("_id", 0, true);
- bob.append( "a", string( MinExtentSize + 300, 'a' ) );
- BSONObj bigger = bob.done();
- StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, true );
- ASSERT( !status.isOK() );
- ASSERT_EQUALS( 0, nRecords() );
- }
- public:
- void run() {
-// log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
- pass(0);
- }
- };
+ BSONObj docForRecordSize( int size ) {
+ BSONObjBuilder b;
+ b.append( "_id", 5 );
+ b.append( "x", string( size - Record::HeaderSize - 22, 'x' ) );
+ BSONObj x = b.obj();
+ ASSERT_EQUALS( Record::HeaderSize + x.objsize(), size );
+ return x;
+ }
+
+ /**
+ * alloc() does not quantize records in capped collections.
+ * NB: this actually tests that the code in Database::createCollection doesn't set
+ * PowerOf2Sizes for capped collections.
+ */
+ class AllocCappedNotQuantized : public Base {
+ public:
+ void run() {
+ OperationContextImpl txn;
+ create();
+ ASSERT( nsd()->isCapped() );
+ ASSERT( !nsd()->isUserFlagSet( NamespaceDetails::Flag_UsePowerOf2Sizes ) );
+
+ StatusWith<RecordId> result =
+ collection()->insertDocument( &txn, docForRecordSize( 300 ), false );
+ ASSERT( result.isOK() );
+ Record* record = collection()->getRecordStore()->recordFor( result.getValue() );
+ // Check that no quantization is performed.
+ ASSERT_EQUALS( 300, record->lengthWithHeaders() );
+ }
+ virtual string spec() const { return "{capped:true,size:2048}"; }
+ };
+
+
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, RecordId loc)
+ */
+ class TruncateCapped : public Base {
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ void pass(int p) {
+ OperationContextImpl txn;
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj();
+
+ int N = MinExtentSize / b.objsize() * nExtents() + 5;
+ int T = N - 4;
+
+ RecordId truncAt;
+ //RecordId l[ 8 ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj bb = bigObj();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bb, true );
+ ASSERT( status.isOK() );
+ RecordId a = status.getValue();
+ if( T == i )
+ truncAt = a;
+ ASSERT( !a.isNull() );
+ /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ if ( i > 3 )
+ ASSERT( l[ i ] == l[ i - 4 ] );*/
+ }
+ ASSERT( nRecords() < N );
+
+ RecordId last, first;
+ {
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::BACKWARD));
+ runner->getNext(NULL, &last);
+ ASSERT( !last.isNull() );
+ }
+ {
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::FORWARD));
+ runner->getNext(NULL, &first);
+ ASSERT( !first.isNull() );
+ ASSERT( first != last ) ;
+ }
+
+ collection()->temp_cappedTruncateAfter(&txn, truncAt, false);
+ ASSERT_EQUALS( collection()->numRecords() , 28u );
+
+ {
+ RecordId loc;
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::FORWARD));
+ runner->getNext(NULL, &loc);
+ ASSERT( first == loc);
+ }
+ {
+ auto_ptr<Runner> runner(InternalPlanner::collectionScan(&txn,
+ ns(),
+ collection(),
+ InternalPlanner::BACKWARD));
+ RecordId loc;
+ runner->getNext(NULL, &loc);
+ ASSERT( last != loc );
+ ASSERT( !last.isNull() );
+ }
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID("_id", 0, true);
+ bob.append( "a", string( MinExtentSize + 300, 'a' ) );
+ BSONObj bigger = bob.done();
+ StatusWith<RecordId> status = collection()->insertDocument( &txn, bigger, true );
+ ASSERT( !status.isOK() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ public:
+ void run() {
+ // log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
+ pass(0);
+ }
+ };
#endif // SERVER-13640
#if 0 // XXXXXX - once RecordStore is clean, we can put this back
- class Migrate : public Base {
- public:
- void run() {
- create();
- nsd()->deletedListEntry( 2 ) = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
- nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing() = RecordId();
- nsd()->cappedLastDelRecLastExtent().Null();
- NamespaceDetails *d = nsd();
-
- zero( &d->capExtent() );
- zero( &d->capFirstNewRecord() );
-
- // this has a side effect of called NamespaceDetails::cappedCheckMigrate
- db()->namespaceIndex().details( ns() );
-
- ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
- ASSERT( nsd()->capExtent().getOfs() != 0 );
- ASSERT( !nsd()->capFirstNewRecord().isValid() );
- int nDeleted = 0;
- for ( RecordId i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted(), ++nDeleted );
- ASSERT_EQUALS( 10, nDeleted );
- ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
- }
- private:
- static void zero( RecordId *d ) {
- memset( d, 0, sizeof( RecordId ) );
- }
- virtual string spec() const {
- return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
- }
- };
+ class Migrate : public Base {
+ public:
+ void run() {
+ create();
+ nsd()->deletedListEntry( 2 ) =
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted();
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted().drec()->nextDeleted().writing()
+ = RecordId(); nsd()->cappedLastDelRecLastExtent().Null(); NamespaceDetails *d = nsd();
+
+ zero( &d->capExtent() );
+ zero( &d->capFirstNewRecord() );
+
+ // this has a side effect of called NamespaceDetails::cappedCheckMigrate
+ db()->namespaceIndex().details( ns() );
+
+ ASSERT( nsd()->firstExtent() == nsd()->capExtent() );
+ ASSERT( nsd()->capExtent().getOfs() != 0 );
+ ASSERT( !nsd()->capFirstNewRecord().isValid() );
+ int nDeleted = 0;
+ for ( RecordId i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i =
+ i.drec()->nextDeleted(), ++nDeleted ); ASSERT_EQUALS( 10, nDeleted );
+ ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
+ }
+ private:
+ static void zero( RecordId *d ) {
+ memset( d, 0, sizeof( RecordId ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
+ }
+ };
#endif
// This isn't a particularly useful test, and because it doesn't clean up
@@ -484,41 +484,41 @@ namespace NamespaceDetailsTests {
// };
#if 0 // SERVER-13640
- class SwapIndexEntriesTest : public Base {
- public:
- void run() {
- create();
- NamespaceDetails *nsd = collection()->detailsWritable();
-
- OperationContextImpl txn;
- // Set 2 & 54 as multikey
- nsd->setIndexIsMultikey(&txn, 2, true);
- nsd->setIndexIsMultikey(&txn, 54, true);
- ASSERT(nsd->isMultikey(2));
- ASSERT(nsd->isMultikey(54));
-
- // Flip 2 & 47
- nsd->setIndexIsMultikey(&txn, 2, false);
- nsd->setIndexIsMultikey(&txn, 47, true);
- ASSERT(!nsd->isMultikey(2));
- ASSERT(nsd->isMultikey(47));
-
- // Reset entries that are already true
- nsd->setIndexIsMultikey(&txn, 54, true);
- nsd->setIndexIsMultikey(&txn, 47, true);
- ASSERT(nsd->isMultikey(54));
- ASSERT(nsd->isMultikey(47));
-
- // Two non-multi-key
- nsd->setIndexIsMultikey(&txn, 2, false);
- nsd->setIndexIsMultikey(&txn, 43, false);
- ASSERT(!nsd->isMultikey(2));
- ASSERT(nsd->isMultikey(54));
- ASSERT(nsd->isMultikey(47));
- ASSERT(!nsd->isMultikey(43));
- }
- virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
- };
+ class SwapIndexEntriesTest : public Base {
+ public:
+ void run() {
+ create();
+ NamespaceDetails *nsd = collection()->detailsWritable();
+
+ OperationContextImpl txn;
+ // Set 2 & 54 as multikey
+ nsd->setIndexIsMultikey(&txn, 2, true);
+ nsd->setIndexIsMultikey(&txn, 54, true);
+ ASSERT(nsd->isMultikey(2));
+ ASSERT(nsd->isMultikey(54));
+
+ // Flip 2 & 47
+ nsd->setIndexIsMultikey(&txn, 2, false);
+ nsd->setIndexIsMultikey(&txn, 47, true);
+ ASSERT(!nsd->isMultikey(2));
+ ASSERT(nsd->isMultikey(47));
+
+ // Reset entries that are already true
+ nsd->setIndexIsMultikey(&txn, 54, true);
+ nsd->setIndexIsMultikey(&txn, 47, true);
+ ASSERT(nsd->isMultikey(54));
+ ASSERT(nsd->isMultikey(47));
+
+ // Two non-multi-key
+ nsd->setIndexIsMultikey(&txn, 2, false);
+ nsd->setIndexIsMultikey(&txn, 43, false);
+ ASSERT(!nsd->isMultikey(2));
+ ASSERT(nsd->isMultikey(54));
+ ASSERT(nsd->isMultikey(47));
+ ASSERT(!nsd->isMultikey(43));
+ }
+ virtual string spec() const { return "{\"capped\":true,\"size\":512,\"$nExtents\":1}"; }
+ };
#endif // SERVER-13640
} // namespace NamespaceDetailsTests
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
index 57872498371..b022b943138 100644
--- a/src/mongo/dbtests/perftests.cpp
+++ b/src/mongo/dbtests/perftests.cpp
@@ -1,7 +1,8 @@
/** @file perftests.cpp.cpp : unit tests relating to performance
- The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
- a long time to run. Obviously we need those too, but they will be separate.
+ The idea herein is tests that run fast and can be part of the normal CI suite. So no
+ tests herein that take a long time to run. Obviously we need those too, but they will be
+ separate.
These tests use DBDirectClient; they are a bit white-boxish.
*/
@@ -218,7 +219,8 @@ protected:
virtual string name() = 0;
- // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time
+ // it.
virtual int howLongMillis() {
return profiling ? 30000 : 5000;
}
@@ -1252,7 +1254,8 @@ public:
void prep() {
{
- // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check
+ // that:
unsigned long long x = 0xffffffffffffffffULL;
ASSERT(x + 2 == 1);
}
@@ -1283,8 +1286,9 @@ public:
((char*&)p)[1]--;
c.gen(p, sz);
ASSERT(c != last);
- ((char*&)p)
- [1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ // check same data, different order, doesn't give same checksum (different longwords
+ // case)
+ ((char*&)p)[1]++;
((char*&)p)[8]--;
c.gen(p, sz);
ASSERT(c != last);
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index ab1ba6196a6..016a67e8eb8 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -27,8 +27,8 @@
*/
/**
- * This file tests db/exec/and_*.cpp and RecordId invalidation. RecordId invalidation forces a fetch
- * so we cannot test it outside of a dbtest.
+ * This file tests db/exec/and_*.cpp and RecordId invalidation. RecordId invalidation forces a
+ * fetch so we cannot test it outside of a dbtest.
*/
#include <boost/scoped_ptr.hpp>
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index e88e3b96144..5d4f76825bb 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -180,9 +180,9 @@ public:
};
//
-// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards is not
-// tested here since there are unresolved race conditions there and probably should be avoided if at all
-// possible.
+// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards
+// is not tested here since there are unresolved race conditions there and probably should be
+// avoided if at all possible.
//
class ChunkManagerCreateFullTest : public ChunkManagerTest {
public:
@@ -224,7 +224,8 @@ public:
set<int> minorVersions;
OID epoch;
- // Check that all chunks were created with version 1|x with consistent epoch and unique minor versions
+ // Check that all chunks were created with version 1|x with consistent epoch and unique
+ // minor versions
while (cursor->more()) {
BSONObj chunk = cursor->next();
@@ -246,8 +247,8 @@ public:
};
//
-// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can be reloaded
-// on top of an old chunk manager with changes.
+// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can
+// be reloaded on top of an old chunk manager with changes.
//
class ChunkManagerLoadBasicTest : public ChunkManagerCreateFullTest {
public:
@@ -355,7 +356,8 @@ public:
}
};
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
+ // Allow validating with and without ranges (b/c our splits won't actually be updated by the
+ // diffs)
void validate(BSONArray chunks, ChunkVersion maxVersion, const VersionMap& maxShardVersions) {
validate(chunks, NULL, maxVersion, maxShardVersions);
}
@@ -386,7 +388,8 @@ public:
chunkCount++;
if (ranges != NULL) {
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;
+ // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
+ // " << chunkCount << endl;
RangeMap::iterator chunkRange =
ranges->find(_inverse ? chunkDoc["max"].Obj() : chunkDoc["min"].Obj());
@@ -586,7 +589,8 @@ public:
chunk[ChunkType::shard()].String())
break;
- // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
+ // log() << "... appending chunk from diff shard: " << prevShardChunk <<
+ // endl;
newChunksB.append(prevShardChunk);
prevShardChunk = BSONObj();
@@ -616,7 +620,8 @@ public:
BSONObj newShard = newShardB.obj();
BSONObj prevShard = prevShardB.obj();
- // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;
+ // log() << " ... migrated to " << newShard << " and updated " << prevShard
+ // << endl;
newChunksB.append(newShard);
newChunksB.append(prevShard);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 5e1c5d7945d..4730f0dd3a8 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -459,8 +459,8 @@ public:
}
};
-// we don't use upgrade so that part is not important currently but the other aspects of this test are
-// interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
+// we don't use upgrade so that part is not important currently but the other aspects of this test
+// are interesting; it would be nice to do analogous tests for SimpleRWLock and QLock
class UpgradableTest : public ThreadedTest<7> {
RWLock m;
@@ -480,7 +480,7 @@ private:
*/
// /-- verify upgrade can be done instantly while in a read lock already
// | /-- verify upgrade acquisition isn't greedy
- // | | /-- verify writes aren't greedy while in upgradable (or are they?)
+ // | | /-- verify writes aren't greedy while in upgradable(or are they?)
// v v v
const char* what = " RURuRwR";
@@ -532,8 +532,9 @@ private:
LOG(Z) << x << ' ' << ch << " got " << endl;
if (what[x] == 'R') {
if (t.millis() > 15) {
- // commented out for less chatter, we aren't using upgradeable anyway right now:
- // log() << x << " info: when in upgradable, write locks are still greedy on this platform" << endl;
+ // commented out for less chatter, we aren't using upgradeable anyway right
+ // now: log() << x << " info: when in upgradable, write locks are still
+ // greedy on this platform" << endl;
}
}
sleepmillis(200);
@@ -560,8 +561,8 @@ void sleepalittle() {
int once;
/* This test is to see how long it takes to get a lock after there has been contention -- the OS
- will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin locks.
- Experimenting with different # of threads would be a good idea.
+ will need to reschedule us. if a spinlock, it will be fast of course, but these aren't spin
+ locks. Experimenting with different # of threads would be a good idea.
*/
template <class whichmutex, class scoped>
class Slack : public ThreadedTest<17> {
@@ -720,8 +721,8 @@ private:
};
-// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but only
-// max _nRooms threads should ever get in at once
+// Tests waiting on the TicketHolder by running many more threads than can fit into the "hotel", but
+// only max _nRooms threads should ever get in at once
class TicketHolderWaits : public ThreadedTest<10> {
static const int checkIns = 1000;
static const int rooms = 3;
@@ -781,8 +782,8 @@ private:
}
virtual void validate() {
- // This should always be true, assuming that it takes < 1 sec for the hardware to process a check-out/check-in
- // Time for test is then ~ #threads / _nRooms * 2 seconds
+ // This should always be true, assuming that it takes < 1 sec for the hardware to process a
+ // check-out/check-in Time for test is then ~ #threads / _nRooms * 2 seconds
verify(_hotel._maxRooms == _hotel._nRooms);
}
};
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
index fd78d6195fe..ed513ee87d5 100644
--- a/src/mongo/s/balance.cpp
+++ b/src/mongo/s/balance.cpp
@@ -511,8 +511,8 @@ bool Balancer::_init() {
// contact the config server and refresh shard information
// checks that each shard is indeed a different process (no hostname mixup)
- // these checks are redundant in that they're redone at every new round but we want to do them initially here
- // so to catch any problem soon
+ // these checks are redundant in that they're redone at every new round but we want to do
+ // them initially here so to catch any problem soon
Shard::reloadShardInfo();
_checkOIDs();
@@ -536,7 +536,8 @@ bool Balancer::_init() {
}
void Balancer::run() {
- // this is the body of a BackgroundJob so if we throw here we're basically ending the balancer thread prematurely
+ // this is the body of a BackgroundJob so if we throw here we're basically ending the balancer
+ // thread prematurely
while (!inShutdown()) {
if (!_init()) {
log() << "will retry to initialize balancer in one minute" << endl;
diff --git a/src/mongo/s/balance.h b/src/mongo/s/balance.h
index 8b47ef500ae..5a5c4312685 100644
--- a/src/mongo/s/balance.h
+++ b/src/mongo/s/balance.h
@@ -44,13 +44,14 @@ namespace mongo {
struct WriteConcernOptions;
/**
- * The balancer is a background task that tries to keep the number of chunks across all servers of the cluster even. Although
- * every mongos will have one balancer running, only one of them will be active at the any given point in time. The balancer
- * uses a 'DistributedLock' for that coordination.
+ * The balancer is a background task that tries to keep the number of chunks across all servers of
+ * the cluster even. Although every mongos will have one balancer running, only one of them will be
+ * active at the any given point in time. The balancer uses a 'DistributedLock' for that
+ * coordination.
*
- * The balancer does act continuously but in "rounds". At a given round, it would decide if there is an imbalance by
- * checking the difference in chunks between the most and least loaded shards. It would issue a request for a chunk
- * migration per round, if it found so.
+ * The balancer does act continuously but in "rounds". At a given round, it would decide if there is
+ * an imbalance by checking the difference in chunks between the most and least loaded shards. It
+ * would issue a request for a chunk migration per round, if it found so.
*/
class Balancer : public BackgroundJob {
public:
@@ -91,11 +92,12 @@ private:
bool _init();
/**
- * Gathers all the necessary information about shards and chunks, and decides whether there are candidate chunks to
- * be moved.
+ * Gathers all the necessary information about shards and chunks, and decides whether there are
+ * candidate chunks to be moved.
*
* @param conn is the connection with the config server(s)
- * @param candidateChunks (IN/OUT) filled with candidate chunks, one per collection, that could possibly be moved
+ * @param candidateChunks (IN/OUT) filled with candidate chunks, one per collection, that could
+ * possibly be moved
*/
void _doBalanceRound(DBClientBase& conn, std::vector<CandidateChunkPtr>* candidateChunks);
@@ -117,7 +119,8 @@ private:
void _ping(bool waiting = false);
/**
- * @return true if all the servers listed in configdb as being shards are reachable and are distinct processes
+ * @return true if all the servers listed in configdb as being shards are reachable and are
+ * distinct processes
*/
bool _checkOIDs();
};
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 77f2331a230..f0028d1d385 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -598,13 +598,15 @@ bool Chunk::splitIfShould(long dataWritten) const {
Status status = split(Chunk::autoSplitInternal, &splitCount, &res);
if (!status.isOK()) {
// split would have issued a message if we got here
- _dataWritten =
- 0; // this means there wasn't enough data to split, so don't want to try again until considerable more data
+ // this means there wasn't enough data to split, so don't want to
+ // try again until considerable more data
+ _dataWritten = 0;
return false;
}
if (maxIsInf() || minIsInf()) {
- // we don't want to reset _dataWritten since we kind of want to check the other side right away
+ // we don't want to reset _dataWritten since we kind of want to
+ // check the other side right away
} else {
_dataWritten = 0; // we're splitting, so should wait a bit
}
@@ -798,9 +800,9 @@ ChunkManager::ChunkManager(const BSONObj& collDoc)
_unique(collDoc[CollectionType::unique()].trueValue()),
_chunkRanges(),
_mutex("ChunkManager"),
- // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
- // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
- // the most up to date value.
+ // The shard versioning mechanism hinges on keeping track of the number of times we reloaded
+ // ChunkManager's. Increasing this number here will prompt checkShardVersion() to refresh the
+ // connection-level versions to the most up to date value.
_sequenceNumber(NextSequenceNumber.addAndFetch(1)) {
//
// Sets up a chunk manager from an existing sharded collection document
@@ -1587,8 +1589,8 @@ int ChunkManager::getCurrentDesiredChunkSize() const {
}
// NOTE (careful when deprecating)
-// currently the sharding is enabled because of a write or read (as opposed to a split or migrate), the shard learns
-// its name and through the 'setShardVersion' command call
+// currently the sharding is enabled because of a write or read (as opposed to a split or
+// migrate), the shard learns its name and through the 'setShardVersion' command call
bool setShardVersion(DBClientBase& conn,
const string& ns,
ChunkVersion version,
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
index 488c691f412..1bde1d67f8b 100644
--- a/src/mongo/s/chunk.h
+++ b/src/mongo/s/chunk.h
@@ -157,7 +157,8 @@ public:
bool splitIfShould(long dataWritten) const;
/**
- * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
+ * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this
+ * chunk.
*
* @param mode
* @param res the object containing details about the split execution
@@ -316,7 +317,8 @@ private:
*/
void determineSplitPoints(bool atMedian, std::vector<BSONObj>* splitPoints) const;
- /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */
+ /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay
+ * in splitting */
static int mkDataWritten();
};
@@ -440,7 +442,8 @@ public:
}
/**
- * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated
+ * this is just an increasing number of how many ChunkManagers we have so we know if something
+ * has been updated
*/
unsigned long long getSequenceNumber() const {
return _sequenceNumber;
diff --git a/src/mongo/s/chunk_diff-inl.h b/src/mongo/s/chunk_diff-inl.h
index 0c89d417000..bd2bc07f529 100644
--- a/src/mongo/s/chunk_diff-inl.h
+++ b/src/mongo/s/chunk_diff-inl.h
@@ -180,7 +180,8 @@ int ConfigDiffTracker<ValType, ShardType>::calculateConfigDiff(
diffChunkDoc[ChunkType::max()].Obj());
// Figure out which of the new chunks we need to track
- // Important - we need to actually own this doc, in case the cursor decides to getMore or unbuffer
+ // Important - we need to actually own this doc, in case the cursor decides to getMore or
+ // unbuffer
if (isTracked(diffChunkDoc))
newTracked.push_back(diffChunkDoc.getOwned());
}
diff --git a/src/mongo/s/commands_admin.cpp b/src/mongo/s/commands_admin.cpp
index bbc7b689f28..7f41e776cff 100644
--- a/src/mongo/s/commands_admin.cpp
+++ b/src/mongo/s/commands_admin.cpp
@@ -1311,7 +1311,8 @@ public:
}
} listShardsCmd;
-/* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
+/* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage
+ * partition. */
class AddShard : public GridAdminCmd {
public:
AddShard() : GridAdminCmd("addShard") {}
@@ -1342,7 +1343,8 @@ public:
return false;
}
- // using localhost in server names implies every other process must use localhost addresses too
+ // using localhost in server names implies every other process must use localhost addresses
+ // too
vector<HostAndPort> serverAddrs = servers.getServers();
for (size_t i = 0; i < serverAddrs.size(); i++) {
if (serverAddrs[i].isLocalHost() != grid.allowLocalHost()) {
diff --git a/src/mongo/s/commands_public.cpp b/src/mongo/s/commands_public.cpp
index dba97b5a7e6..19c4a169cef 100644
--- a/src/mongo/s/commands_public.cpp
+++ b/src/mongo/s/commands_public.cpp
@@ -654,7 +654,8 @@ public:
}
}
- // result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to get this
+ // result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to
+ // get this
output.appendNumber("objects", objects);
/* avgObjSize on mongod is not scaled based on the argument to db.stats(), so we use
* unscaledDataSize here for consistency. See SERVER-7347. */
@@ -1735,7 +1736,8 @@ public:
verify(0);
}
- // We could support arbitrary shard keys by sending commands to all shards but I don't think we should
+ // We could support arbitrary shard keys by sending commands to all shards but I don't think
+ // we should
errmsg =
"GridFS fs.chunks collection must be sharded on either {files_id:1} or {files_id:1, "
"n:1}";
@@ -2037,7 +2039,8 @@ public:
uassert(15920,
"Cannot output to a non-sharded collection, a sharded collection exists",
!confOut->isSharded(finalColLong));
- // should we also prevent going from non-sharded to sharded? during the transition client may see partial data
+ // should we also prevent going from non-sharded to sharded? during the transition client
+ // may see partial data
long long maxChunkSizeBytes = 0;
if (shardedOutput) {
@@ -2102,14 +2105,15 @@ public:
dlk = dist_lock_try( &lockSetup , (string)"mr-parallel" );
if ( ! dlk.got() ) {
if ( ++tryc % 100 == 0 )
- warning() << "the collection metadata could not be locked for mapreduce, already locked by " << dlk.other() << endl;
- sleepmillis(100);
+ warning() << "the collection metadata could not be locked for
+ mapreduce, already locked by " << dlk.other() << endl;
+ sleepmillis(100);
}
}
}
catch( LockException& e ){
- errmsg = str::stream() << "error locking distributed lock for mapreduce " << causedBy( e );
- return false;
+ errmsg = str::stream() << "error locking distributed lock for mapreduce " <<
+ causedBy( e ); return false;
}
}
*/
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 3c623253970..a4c16dc405b 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -248,7 +248,8 @@ ChunkManagerPtr DBConfig::shardCollection(const string& ns,
}
// Tell the primary mongod to refresh it's data
- // TODO: Think the real fix here is for mongos to just assume all collections sharded, when we get there
+ // TODO: Think the real fix here is for mongos to just assume all collections sharded, when we
+ // get there
for (int i = 0; i < 4; i++) {
if (i == 3) {
warning() << "too many tries updating initial version of " << ns << " on shard primary "
@@ -308,8 +309,8 @@ void DBConfig::getChunkManagerOrPrimary(const string& ns,
ChunkManagerPtr& manager,
ShardPtr& primary) {
// The logic here is basically that at any time, our collection can become sharded or unsharded
- // via a command. If we're not sharded, we want to send data to the primary, if sharded, we want
- // to send data to the correct chunks, and we can't check both w/o the lock.
+ // via a command. If we're not sharded, we want to send data to the primary, if sharded, we
+ // want to send data to the correct chunks, and we can't check both w/o the lock.
manager.reset();
primary.reset();
@@ -326,9 +327,10 @@ void DBConfig::getChunkManagerOrPrimary(const string& ns,
} else {
CollectionInfo& cInfo = i->second;
- // TODO: we need to be careful about handling shardingEnabled, b/c in some places we seem to use and
- // some we don't. If we use this function in combination with just getChunkManager() on a slightly
- // borked config db, we'll get lots of staleconfig retries
+ // TODO: we need to be careful about handling shardingEnabled, b/c in some places we
+ // seem to use and some we don't. If we use this function in combination with just
+ // getChunkManager() on a slightly borked config db, we'll get lots of staleconfig
+ // retries
if (_shardingEnabled && cInfo.isSharded()) {
manager = cInfo.getCM();
} else {
@@ -388,7 +390,8 @@ ChunkManagerPtr DBConfig::getChunkManager(const string& ns, bool shouldReload, b
verify(!key.isEmpty());
// TODO: We need to keep this first one-chunk check in until we have a more efficient way of
- // creating/reusing a chunk manager, as doing so requires copying the full set of chunks currently
+ // creating/reusing a chunk manager, as doing so requires copying the full set of chunks
+ // currently
BSONObj newest;
if (oldVersion.isSet() && !forceReload) {
@@ -470,9 +473,9 @@ ChunkManagerPtr DBConfig::getChunkManager(const string& ns, bool shouldReload, b
//
// LEGACY BEHAVIOR
- // It's possible to get into a state when dropping collections when our new version is less than our prev
- // version. Behave identically to legacy mongos, for now, and warn to draw attention to the problem.
- // TODO: Assert in next version, to allow smooth upgrades
+ // It's possible to get into a state when dropping collections when our new version is less than
+ // our prev version. Behave identically to legacy mongos, for now, and warn to draw attention
+ // to the problem. TODO: Assert in next version, to allow smooth upgrades
//
if (shouldReset && temp->getVersion() < ci.getCM()->getVersion()) {
@@ -512,9 +515,10 @@ void DBConfig::unserialize(const BSONObj& from) {
_shardingEnabled = from.getBoolField(DatabaseType::DEPRECATED_partitioned().c_str());
_primary.reset(from.getStringField(DatabaseType::primary().c_str()));
- // In the 1.5.x series, we used to have collection metadata nested in the database entry. The 1.6.x series
- // had migration code that ported that info to where it belongs now: the 'collections' collection. We now
- // just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
+ // In the 1.5.x series, we used to have collection metadata nested in the database entry. The
+ // 1.6.x series had migration code that ported that info to where it belongs now: the
+ // 'collections' collection. We now just assert that we're not migrating from a 1.5.x directly
+ // into a 1.7.x without first converting.
BSONObj sharded = from.getObjectField(DatabaseType::DEPRECATED_sharded().c_str());
if (!sharded.isEmpty())
uasserted(
@@ -1241,7 +1245,8 @@ void ConfigServer::logChange(const string& what, const string& ns, const BSONObj
}
catch (std::exception& e) {
- // if we got here, it means the config change is only in the log; it didn't make it to config.changelog
+ // if we got here, it means the config change is only in the log; it didn't make it to
+ // config.changelog
log() << "not logging config change: " << changeID << " " << e.what() << endl;
}
}
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index ec74ae9374f..dd131fd5d30 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -215,8 +215,10 @@ protected:
Shard _primary; // e.g. localhost , mongo.foo.com:9999
bool _shardingEnabled;
- // map<std::string,CollectionInfo> _sharded; // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
- // map<std::string,ChunkManagerPtr> _shards; // this will only have entries for things that have been looked at
+ // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
+ // map<std::string,CollectionInfo> _sharded;
+ // this will only have entries for things that have been looked at
+ // map<std::string,ChunkManagerPtr> _shards;
Collections _collections;
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 13c6beae0a2..8ec1b1e219c 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -523,7 +523,8 @@ public:
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
- // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
+ // there's a fair amount of slack before we determine a chunk is too
+ // large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = collection->numRecords(txn);
@@ -950,7 +951,8 @@ public:
}
} initialCloneCommand;
-// Tests can pause / resume moveChunk's progress at each step by enabling / disabling each fail point.
+// Tests can pause / resume moveChunk's progress at each step by enabling /
+// disabling each fail point.
MONGO_FP_DECLARE(moveChunkHangAtStep1);
MONGO_FP_DECLARE(moveChunkHangAtStep2);
MONGO_FP_DECLARE(moveChunkHangAtStep3);
@@ -1464,8 +1466,8 @@ public:
log() << "moveChunk setting version to: " << myVersion << migrateLog;
// 5.b
- // we're under the collection lock here, too, so we can undo the chunk donation because no other state change
- // could be ongoing
+ // we're under the collection lock here, too, so we can undo the
+ // chunk donation because no other state change could be ongoing
BSONObj res;
bool ok;
@@ -1509,12 +1511,13 @@ public:
// 5.c
// version at which the next highest lastmod will be set
- // if the chunk being moved is the last in the shard, nextVersion is that chunk's lastmod
- // otherwise the highest version is from the chunk being bumped on the FROM-shard
+ // if the chunk being moved is the last in the shard, nextVersion is that chunk's
+ // lastmod otherwise the highest version is from the chunk being bumped on the
+ // FROM-shard
ChunkVersion nextVersion;
- // we want to go only once to the configDB but perhaps change two chunks, the one being migrated and another
- // local one (so to bump version for the entire shard)
+ // we want to go only once to the configDB but perhaps change two chunks, the one being
+ // migrated and another local one (so to bump version for the entire shard)
// we use the 'applyOps' mechanism to group the two updates and make them safer
// TODO pull config update code to a module
@@ -1673,8 +1676,9 @@ public:
// this could be a blip in the connectivity
// wait out a few seconds and check if the commit request made it
//
- // if the commit made it to the config, we'll see the chunk in the new shard and there's no action
- // if the commit did not make it, currently the only way to fix this state is to bounce the mongod so
+ // if the commit made it to the config, we'll see the chunk in the new shard and
+ // there's no action if the commit did not make it, currently the only way to fix
+ // this state is to bounce the mongod so
// that the old state (before migrating) be brought in
warning() << "moveChunk commit outcome ongoing: " << cmd
@@ -2182,7 +2186,8 @@ public:
MONGO_FP_PAUSE_WHILE(migrateThreadHangAtStep3);
}
- // if running on a replicated system, we'll need to flush the docs we cloned to the secondaries
+ // if running on a replicated system, we'll need to flush the docs we cloned to the
+ // secondaries
ReplTime lastOpApplied = txn->getClient()->getLastOp().asDate();
{
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index ba1022f0513..ccb5d5e0c1d 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -282,8 +282,9 @@ public:
BSONObjBuilder& result,
bool fromRepl) {
//
- // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
- // a good approximation of the size of the chunk -- without needing to access the actual data.
+ // 1.a We'll parse the parameters in two steps. First, make sure the we
+ // can use the split index to get a good approximation of the size of
+ // the chunk -- without needing to access the actual data.
//
const std::string ns = parseNs(dbname, jsobj);
@@ -294,7 +295,8 @@ public:
return false;
}
- // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
+ // If min and max are not provided use the "minKey" and "maxKey" for
+ // the sharding key pattern.
BSONObj min = jsobj.getObjectField("min");
BSONObj max = jsobj.getObjectField("max");
if (min.isEmpty() != max.isEmpty()) {
@@ -350,12 +352,12 @@ public:
const long long dataSize = collection->dataSize(txn);
//
- // 1.b Now that we have the size estimate, go over the remaining parameters and apply any maximum size
- // restrictions specified there.
+ // 1.b Now that we have the size estimate, go over the remaining parameters and apply
+ // any maximum size restrictions specified there.
//
- // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current chunk, i.e., the
- // logic below will split that chunk in half
+ // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current
+ // chunk, i.e., the logic below will split that chunk in half
long long maxChunkSize = 0;
bool forceMedianSplit = false;
{
@@ -377,8 +379,8 @@ public:
}
}
- // We need a maximum size for the chunk, unless we're not actually capable of finding any
- // split points.
+ // We need a maximum size for the chunk, unless we're not actually capable of
+ // finding any split points.
if (maxChunkSize <= 0 && recCount != 0) {
errmsg =
"need to specify the desired max chunk size (maxChunkSize or "
@@ -398,9 +400,9 @@ public:
log() << "request split points lookup for chunk " << ns << " " << min << " -->> " << max
<< endl;
- // We'll use the average object size and number of object to find approximately how many keys
- // each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, if
- // provided.
+ // We'll use the average object size and number of object to find approximately how many
+ // keys each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects,
+ // if provided.
const long long avgRecSize = dataSize / recCount;
long long keyCount = maxChunkSize / (2 * avgRecSize);
if (maxChunkObjects && (maxChunkObjects < keyCount)) {
@@ -429,9 +431,9 @@ public:
return false;
}
- // Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel, to be removed
- // at the end. If a key appears more times than entries allowed on a chunk, we issue a warning and
- // split on the following key.
+ // Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel,
+ // to be removed at the end. If a key appears more times than entries allowed on a
+ // chunk, we issue a warning and split on the following key.
set<BSONObj> tooFrequentKeys;
splitKeys.push_back(
prettyKey(idx->keyPattern(), currKey.getOwned()).extractFields(keyPattern));
@@ -444,7 +446,8 @@ public:
if (currCount > keyCount && !forceMedianSplit) {
currKey = prettyKey(idx->keyPattern(), currKey.getOwned())
.extractFields(keyPattern);
- // Do not use this split key if it is the same used in the previous split point.
+ // Do not use this split key if it is the same used in the previous split
+ // point.
if (currKey.woCompare(splitKeys.back()) == 0) {
tooFrequentKeys.insert(currKey.getOwned());
} else {
@@ -488,8 +491,8 @@ public:
}
//
- // 3. Format the result and issue any warnings about the data we gathered while traversing the
- // index
+ // 3. Format the result and issue any warnings about the data we gathered while
+ // traversing the index
//
// Warn for keys that are more numerous than maxChunkSize allows.
@@ -720,7 +723,8 @@ public:
log() << "splitChunk accepted at version " << shardVersion;
//
- // 3. create the batch of updates to metadata ( the new chunks ) to be applied via 'applyOps' command
+ // 3. create the batch of updates to metadata ( the new chunks ) to be applied via
+ // 'applyOps' command
//
BSONObjBuilder logDetail;
diff --git a/src/mongo/s/d_state.h b/src/mongo/s/d_state.h
index c954f73f425..d798d17deb7 100644
--- a/src/mongo/s/d_state.h
+++ b/src/mongo/s/d_state.h
@@ -327,8 +327,8 @@ public:
}
private:
- bool
- _forceVersionOk; // if this is true, then chunk version #s aren't check, and all ops are allowed
+ // if this is true, then chunk version #s aren't check, and all ops are allowed
+ bool _forceVersionOk;
typedef std::map<std::string, ChunkVersion> NSVersionMap;
NSVersionMap _versions;
diff --git a/src/mongo/s/distlock.cpp b/src/mongo/s/distlock.cpp
index 1e1d3904014..079f356abd5 100644
--- a/src/mongo/s/distlock.cpp
+++ b/src/mongo/s/distlock.cpp
@@ -229,8 +229,8 @@ public:
BSON(LocksType::lockID(*i)),
BSON("$set" << BSON(LocksType::state(0))));
- // Either the update went through or it didn't, either way we're done trying to
- // unlock
+ // Either the update went through or it didn't, either way we're done trying
+ // to unlock
LOG(DistributedLock::logLvl - 1)
<< "handled late remove of old distributed lock with ts " << *i << endl;
removed = true;
@@ -368,8 +368,8 @@ private:
} distLockPinger;
/**
- * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
- * specified (time between pings)
+ * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom
+ * sleep time is specified (time between pings)
*/
DistributedLock::DistributedLock(const ConnectionString& conn,
const string& name,
@@ -418,8 +418,9 @@ const string& DistributedLock::getProcessId() {
}
/**
- * Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
- * and the actual time on the remote server (at the completion of the function) is the maxNetSkew
+ * Returns the remote time as reported by the cluster or server. The maximum difference between the
+ * reported time and the actual time on the remote server (at the completion of the function) is the
+ * maxNetSkew
*/
Date_t DistributedLock::remoteTime(const ConnectionString& cluster, unsigned long long maxNetSkew) {
ConnectionString server(*cluster.getServers().begin());
@@ -460,8 +461,8 @@ Date_t DistributedLock::remoteTime(const ConnectionString& cluster, unsigned lon
13647);
}
- // Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
- // time value can be off by if we assume a response in the middle of the delay.
+ // Make sure that our delay is not more than 2x our maximum network skew, since this is the max
+ // our remote time value can be off by if we assume a response in the middle of the delay.
if (delay > (long long)(maxNetSkew * 2)) {
throw TimeNotFoundException(
str::stream() << "server " << server.toString() << " in cluster " << cluster.toString()
@@ -490,7 +491,8 @@ bool DistributedLock::checkSkew(const ConnectionString& cluster,
if (i == 0)
avgSkews.push_back(0);
- // Could check if this is self, but shouldn't matter since local network connection should be fast.
+ // Could check if this is self, but shouldn't matter since local network connection
+ // should be fast.
ConnectionString server(*si);
vector<long long> skew;
@@ -605,11 +607,11 @@ static void logErrMsgOrWarn(const StringData& messagePrefix,
}
}
-// Semantics of this method are basically that if the lock cannot be acquired, returns false, can be retried.
-// If the lock should not be tried again (some unexpected error) a LockException is thrown.
+// Semantics of this method are basically that if the lock cannot be acquired, returns false, can be
+// retried. If the lock should not be tried again (some unexpected error) a LockException is thrown.
// If we are only trying to re-enter a currently held lock, reenter should be true.
-// Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still only
-// be unlocked once, instead it is used to verify that the lock is already held.
+// Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still
+// only be unlocked once, instead it is used to verify that the lock is already held.
bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other, double timeout) {
// TODO: Start pinging only when we actually get the lock?
// If we don't have a thread pinger, make sure we shouldn't have one
@@ -674,8 +676,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
<< " scheduled for late unlock" << endl;
}
- // reset since we've been bounced by a previous lock not being where we thought it was,
- // and should go through full forcing process if required.
+ // reset since we've been bounced by a previous lock not being where we thought it
+ // was, and should go through full forcing process if required.
// (in theory we should never see a ping here if used correctly)
*other = o;
other->getOwned();
@@ -688,7 +690,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
LockpingsType::ConfigNS, o[LocksType::process()].wrap(LockpingsType::process()));
if (lastPing.isEmpty()) {
LOG(logLvl) << "empty ping found for process in lock '" << lockName << "'" << endl;
- // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
+ // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then,
+ // so will a lot.
lastPing = BSON(LockpingsType::process(o[LocksType::process()].String())
<< LockpingsType::ping((Date_t)0));
}
@@ -730,7 +733,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
} catch (LockException& e) {
// Remote server cannot be found / is not responsive
warning() << "Could not get remote time from " << _conn << causedBy(e);
- // If our config server is having issues, forget all the pings until we can see it again
+ // If our config server is having issues, forget all the pings until we can see it
+ // again
resetLastPing();
}
@@ -758,8 +762,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
if (elapsed > takeover) {
// Lock may forced, reset our timer if succeeds or fails
- // Ensures that another timeout must happen if something borks up here, and resets our pristine
- // ping state if acquired.
+ // Ensures that another timeout must happen if something borks up here, and resets
+ // our pristine ping state if acquired.
resetLastPing();
try {
@@ -785,7 +789,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
BSONObj err = conn->getLastErrorDetailed();
string errMsg = DBClientWithCommands::getLastErrorString(err);
- // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ // TODO: Clean up all the extra code to exit this method, probably with a
+ // refactor
if (!errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1) {
logErrMsgOrWarn(
"Could not force lock", lockName, errMsg, "(another force won");
@@ -796,8 +801,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
}
} catch (UpdateNotTheSame&) {
- // Ok to continue since we know we forced at least one lock document, and all lock docs
- // are required for a lock to be held.
+ // Ok to continue since we know we forced at least one lock document, and all
+ // lock docs are required for a lock to be held.
warning() << "lock forcing " << lockName << " inconsistent" << endl;
} catch (const LockException&) {
// Let the exception go up and don't repackage the exception.
@@ -813,13 +818,14 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
verify(canReenter);
// Lock may be re-entered, reset our timer if succeeds or fails
- // Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
- // This ensures that two attempts are still required for a force if not acquired, and resets our
- // state if we are acquired.
+ // Not strictly necessary, but helpful for small timeouts where thread scheduling is
+ // significant. This ensures that two attempts are still required for a force if not
+ // acquired, and resets our state if we are acquired.
resetLastPing();
- // Test that the lock is held by trying to update the finalized state of the lock to the same state
- // if it does not update or does not update on all servers, we can't re-enter.
+ // Test that the lock is held by trying to update the finalized state of the lock to
+ // the same state if it does not update or does not update on all servers, we can't
+ // re-enter.
try {
// Test the lock with the correct "ts" (OID) value
conn->update(LocksType::ConfigNS,
@@ -831,7 +837,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
BSONObj err = conn->getLastErrorDetailed();
string errMsg = DBClientWithCommands::getLastErrorString(err);
- // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ // TODO: Clean up all the extra code to exit this method, probably with a
+ // refactor
if (!errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1) {
logErrMsgOrWarn(
"Could not re-enter lock", lockName, errMsg, "(not sure lock is held");
@@ -864,7 +871,8 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
LOG(logLvl - 1) << "lock '" << lockName << "' successfully forced" << endl;
- // We don't need the ts value in the query, since we will only ever replace locks with state=0.
+ // We don't need the ts value in the query, since we will only ever replace locks with
+ // state=0.
}
// Case 3: We have an expired lock
else if (o[LocksType::lockID()].type()) {
@@ -872,8 +880,9 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
}
}
- // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
- // and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
+ // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock
+ // state is open and no locks need to be forced. If anything goes wrong, we don't want to
+ // remember an old lock.
resetLastPing();
bool gotLock = false;
@@ -930,10 +939,11 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
indUpdate = indDB->findOne(LocksType::ConfigNS, BSON(LocksType::name(_name)));
// If we override this lock in any way, grab and protect it.
- // We assume/ensure that if a process does not have all lock documents, it is no longer
- // holding the lock.
- // Note - finalized locks may compete too, but we know they've won already if competing
- // in this round. Cleanup of crashes during finalizing may take a few tries.
+ // We assume/ensure that if a process does not have all lock documents, it is no
+ // longer holding the lock.
+ // Note - finalized locks may compete too, but we know they've won already if
+ // competing in this round. Cleanup of crashes during finalizing may take a few
+ // tries.
if (indUpdate[LocksType::lockID()] < lockDetails[LocksType::lockID()] ||
indUpdate[LocksType::state()].numberInt() == 0) {
BSONObj grabQuery =
@@ -945,8 +955,9 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
BSON(LocksType::lockID(lockDetails[LocksType::lockID()].OID())
<< LocksType::state(1));
- // Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
- // process grabbed the lock (which will change the ts), but the lock will be set until forcing
+ // Either our update will succeed, and we'll grab the lock, or it will fail b/c
+ // some other process grabbed the lock (which will change the ts), but the lock
+ // will be set until forcing
indDB->update(LocksType::ConfigNS, grabQuery, BSON("$set" << grabChanges));
indUpdate = indDB->findOne(LocksType::ConfigNS, BSON(LocksType::name(_name)));
@@ -1011,11 +1022,12 @@ bool DistributedLock::lock_try(const string& why, bool reenter, BSONObj* other,
// Complete lock propagation
if (gotLock) {
- // This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
- // least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
- // The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
- // indicates the lock is active, but this means the process creating/destroying them must explicitly poll
- // when something goes wrong.
+ // This is now safe, since we know that no new locks will be placed on top of the ones we've
+ // checked for at least 15 minutes. Sets the state = 2, so that future clients can
+ // determine that the lock is truly set. The invariant for rollbacks is that we will never
+ // force locks with state = 2 and active pings, since that indicates the lock is active, but
+ // this means the process creating/destroying them must explicitly poll when something goes
+ // wrong.
try {
BSONObjBuilder finalLockDetails;
BSONObjIterator bi(lockDetails);
diff --git a/src/mongo/s/distlock.h b/src/mongo/s/distlock.h
index c87b6fda5d0..d57a7355a0d 100644
--- a/src/mongo/s/distlock.h
+++ b/src/mongo/s/distlock.h
@@ -137,12 +137,13 @@ public:
static LastPings lastPings;
/**
- * The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
- * Construction does trigger a lock "pinging" mechanism, though.
+ * The constructor does not connect to the configdb yet and constructing does not mean the lock
+ * was acquired. Construction does trigger a lock "pinging" mechanism, though.
*
* @param conn address of config(s) server(s)
* @param name identifier for the lock
- * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
+ * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it
+ * (in minutes).
* @param lockPing how long to wait between lock pings
* @param legacy use legacy logic
*
@@ -154,13 +155,14 @@ public:
~DistributedLock(){};
/**
- * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
- * consider using the dist_lock_try construct to acquire this lock in an exception safe way.
+ * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous
+ * holder. Please consider using the dist_lock_try construct to acquire this lock in an
+ * exception safe way.
*
* @param why human readable description of why the lock is being taken (used to log)
* @param whether this is a lock re-entry or a new lock
- * @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
- * details if not
+ * @param other configdb's lock document that is currently holding the lock, if lock is taken,
+ * or our own lock details if not
* @return true if it managed to grab the lock
*/
bool lock_try(const std::string& why,
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 2982d492773..0b4445a0596 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -294,8 +294,8 @@ bool Grid::addShard(string* name,
}
}
- // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
- // the set. It is fine if not all members of the set are present in 'servers'.
+ // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are
+ // part of the set. It is fine if not all members of the set are present in 'servers'.
bool foundAll = true;
string offendingHost;
if (!commandSetName.empty()) {
@@ -343,9 +343,9 @@ bool Grid::addShard(string* name,
if (name->empty() && !setName.empty())
*name = setName;
- // In order to be accepted as a new shard, that mongod must not have any database name that exists already
- // in any other shards. If that test passes, the new shard's databases are going to be entered as
- // non-sharded db's whose primary is the newly added shard.
+ // In order to be accepted as a new shard, that mongod must not have any database name that
+ // exists already in any other shards. If that test passes, the new shard's databases are
+ // going to be entered as non-sharded db's whose primary is the newly added shard.
BSONObj resListDB;
ok = newShardConn->runCommand("admin", BSON("listDatabases" << 1), resListDB);
@@ -491,8 +491,8 @@ bool Grid::_getNewShardName(string* name) const {
}
/*
- * Returns whether balancing is enabled, with optional namespace "ns" parameter for balancing on a particular
- * collection.
+ * Returns whether balancing is enabled, with optional namespace "ns" parameter for balancing on a
+ * particular collection.
*/
bool Grid::shouldBalance(const SettingsType& balancerSettings) const {
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index fa76efd9322..80c70933565 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -155,7 +155,8 @@ public:
/**
* @param balancerDoc bson that may contain a window of time for the balancer to work
* format { ... , activeWindow: { start: "8:30" , stop: "19:00" } , ... }
- * @return true if there is no window of time specified for the balancer or it we're currently in it
+ * @return true if there is no window of time specified for the balancer or it we're currently
+ * in it
*/
static bool _inBalancingWindow(const BSONObj& balancerDoc, const boost::posix_time::ptime& now);
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index c042494e620..d7bf0d48b9d 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -365,7 +365,8 @@ static int _main() {
ExitCode exitCode = runMongosServer(mongosGlobalParams.upgrade);
- // To maintain backwards compatibility, we exit with EXIT_NET_ERROR if the listener loop returns.
+ // To maintain backwards compatibility, we exit with EXIT_NET_ERROR if the listener loop
+ // returns.
if (exitCode == EXIT_NET_ERROR) {
dbexit(EXIT_NET_ERROR);
}
diff --git a/src/mongo/s/shard.cpp b/src/mongo/s/shard.cpp
index efffae93ea7..42a14ef5b7d 100644
--- a/src/mongo/s/shard.cpp
+++ b/src/mongo/s/shard.cpp
@@ -110,10 +110,10 @@ public:
scoped_lock lk(_mutex);
- // We use the _lookup table for all shards and for the primary config DB. The config DB info,
- // however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave
- // the config state intact. The rationale is that this way we could drop shards that
- // were removed without reinitializing the config DB information.
+ // We use the _lookup table for all shards and for the primary config DB. The config DB
+ // info, however, does not come from the ShardNS::shard. So when cleaning the _lookup table
+ // we leave the config state intact. The rationale is that this way we could drop shards
+ // that were removed without reinitializing the config DB information.
ShardMap::iterator i = _lookup.find("config");
if (i != _lookup.end()) {
diff --git a/src/mongo/s/type_changelog.h b/src/mongo/s/type_changelog.h
index 5982792dbd0..26404a3755f 100644
--- a/src/mongo/s/type_changelog.h
+++ b/src/mongo/s/type_changelog.h
@@ -260,8 +260,8 @@ private:
// Convention: (M)andatory, (O)ptional, (S)pecial rule.
std::string _changeID; // (M) id for this change "<hostname>-<current_time>-<increment>"
bool _isChangeIDSet;
- std::string
- _server; // (M) hostname of server that we are making the change on. Does not include port.
+ // (M) hostname of server that we are making the change on. Does not include port.
+ std::string _server;
bool _isServerSet;
std::string _clientAddr; // (M) hostname:port of the client that made this change
bool _isClientAddrSet;
diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h
index e68afadf3e7..5b89fbaf808 100644
--- a/src/mongo/scripting/bson_template_evaluator.h
+++ b/src/mongo/scripting/bson_template_evaluator.h
@@ -174,7 +174,8 @@ private:
BSONObjBuilder& out);
/*
- * Operator method to support #RAND_INT_PLUS_THREAD : { key : { #RAND_INT_PLUS_THREAD: [10, 20] } }
+ * Operator method to support #RAND_INT_PLUS_THREAD : { key : { #RAND_INT_PLUS_THREAD: [10, 20]
+ * } }
* See #RAND_INT above for definition. This variation differs from the base in the
* it uses the upper bound of the requested range to segment the ranges by
* the thread_id of the TemplateEvaluator - thus
diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp
index 07ee50a4f9b..ef3850d27dc 100644
--- a/src/mongo/scripting/bson_template_evaluator_test.cpp
+++ b/src/mongo/scripting/bson_template_evaluator_test.cpp
@@ -681,7 +681,8 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
ASSERT_LESS_THAN(randValue1, 5);
// Test success when operators are arbitrarily nested within multiple elements
- // {id: { foo: "hi", bar: { #op: [1, 5] }, baz: { baz_a: { #op, [5, 10] }, baz_b: { #op, [10, 15] }, baz_c: "bye" } }
+ // {id: { foo: "hi", bar: { #op: [1, 5] }, baz: { baz_a: { #op, [5, 10] }, baz_b: { #op, [10,
+ // 15] }, baz_c: "bye" } }
BSONObjBuilder builder4;
BSONObj barObj4 = BSON("#RAND_INT" << BSON_ARRAY(1 << 5));
BSONObj bazObj4a = BSON("#RAND_INT" << BSON_ARRAY(5 << 10));
diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp
index a5c2ca05e6a..8bd30c9f744 100644
--- a/src/mongo/shell/linenoise.cpp
+++ b/src/mongo/shell/linenoise.cpp
@@ -889,7 +889,8 @@ void InputBuffer::refreshLine(PromptBase& pi) {
#ifndef _WIN32
/**
- * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode (UChar32) character it encodes
+ * Read a UTF-8 sequence from the non-Windows keyboard and return the Unicode (UChar32) character it
+ * encodes
*
* @return UChar32 Unicode character
*/
@@ -1257,16 +1258,21 @@ static UChar32 linenoiseReadChar(void) {
char buf[1024];
sprintf(
buf,
- "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X, virtual scancode 0x%04X, key %s%s%s%s%s\n",
+ "Unicode character 0x%04X, repeat count %d, virtual keycode 0x%04X,"
+ "virtual scancode 0x%04X, key %s%s%s%s%s\n",
rec.Event.KeyEvent.uChar.UnicodeChar,
rec.Event.KeyEvent.wRepeatCount,
rec.Event.KeyEvent.wVirtualKeyCode,
rec.Event.KeyEvent.wVirtualScanCode,
rec.Event.KeyEvent.bKeyDown ? "down" : "up",
- (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ? " L-Ctrl" : "",
- (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ? " R-Ctrl" : "",
- (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ? " L-Alt" : "",
- (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ? " R-Alt" : ""
+ (rec.Event.KeyEvent.dwControlKeyState & LEFT_CTRL_PRESSED) ? "L-Ctrl"
+ : "",
+ (rec.Event.KeyEvent.dwControlKeyState & RIGHT_CTRL_PRESSED) ? "R-Ctrl"
+ : "",
+ (rec.Event.KeyEvent.dwControlKeyState & LEFT_ALT_PRESSED) ? "L-Alt"
+ : "",
+ (rec.Event.KeyEvent.dwControlKeyState & RIGHT_ALT_PRESSED) ? "R-Alt"
+ : ""
);
OutputDebugStringA( buf );
//}
@@ -1276,15 +1282,17 @@ static UChar32 linenoiseReadChar(void) {
if (rec.EventType != KEY_EVENT) {
continue;
}
- // Windows provides for entry of characters that are not on your keyboard by sending the Unicode
- // characters as a "key up" with virtual keycode 0x12 (VK_MENU == Alt key) ... accept these characters,
- // otherwise only process characters on "key down"
+ // Windows provides for entry of characters that are not on your keyboard by sending the
+ // Unicode characters as a "key up" with virtual keycode 0x12 (VK_MENU == Alt key) ...
+ // accept these characters, otherwise only process characters on "key down"
if (!rec.Event.KeyEvent.bKeyDown && rec.Event.KeyEvent.wVirtualKeyCode != VK_MENU) {
continue;
}
modifierKeys = 0;
- // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't treat this combination as either CTRL or META
- // we just turn off those two bits, so it is still possible to combine CTRL and/or META with an AltGr key by using right-Ctrl and/or left-Alt
+ // AltGr is encoded as ( LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED ), so don't treat this
+ // combination as either CTRL or META we just turn off those two bits, so it is still
+ // possible to combine CTRL and/or META with an AltGr key by using right-Ctrl and/or
+ // left-Alt
if ((rec.Event.KeyEvent.dwControlKeyState & (LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) ==
(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED)) {
rec.Event.KeyEvent.dwControlKeyState &= ~(LEFT_CTRL_PRESSED | RIGHT_ALT_PRESSED);
@@ -1437,16 +1445,16 @@ static const size_t completionCountCutoff = 100;
/**
* Handle command completion, using a completionCallback() routine to provide possible substitutions
- * This routine handles the mechanics of updating the user's input buffer with possible replacement of
- * text as the user selects a proposed completion string, or cancels the completion attempt.
+ * This routine handles the mechanics of updating the user's input buffer with possible replacement
+ * of text as the user selects a proposed completion string, or cancels the completion attempt.
* @param pi PromptBase struct holding information about the prompt and our screen position
*/
int InputBuffer::completeLine(PromptBase& pi) {
linenoiseCompletions lc;
char c = 0;
- // completionCallback() expects a parsable entity, so find the previous break character and extract
- // a copy to parse. we also handle the case where tab is hit while not at end-of-line.
+ // completionCallback() expects a parsable entity, so find the previous break character and
+ // extract a copy to parse. we also handle the case where tab is hit while not at end-of-line.
int startIndex = pos;
while (--startIndex >= 0) {
if (strchr(breakChars, buf32[startIndex])) {
@@ -1704,10 +1712,11 @@ void InputBuffer::clearScreen(PromptBase& pi) {
}
/**
- * Incremental history search -- take over the prompt and keyboard as the user types a search string,
- * deletes characters from it, changes direction, and either accepts the found line (for execution or
- * editing) or cancels.
- * @param pi PromptBase struct holding information about the (old, static) prompt and our screen position
+ * Incremental history search -- take over the prompt and keyboard as the user types a search
+ * string, deletes characters from it, changes direction, and either accepts the found line (for
+ * execution or editing) or cancels.
+ * @param pi PromptBase struct holding information about the (old, static) prompt and our
+ * screen position
* @param startChar the character that began the search, used to set the initial direction
*/
int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) {
@@ -1715,7 +1724,8 @@ int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) {
size_t ucharCount;
int errorCode;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't have to
+ // special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
bufferSize = sizeof(UChar32) * len + 1;
@@ -2271,7 +2281,8 @@ int InputBuffer::getInputLine(PromptBase& pi) {
case DOWN_ARROW_KEY:
case UP_ARROW_KEY:
killRing.lastAction = KillRing::actionOther;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't
+ // have to special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
size_t tempBufferSize = sizeof(UChar32) * len + 1;
@@ -2474,7 +2485,8 @@ int InputBuffer::getInputLine(PromptBase& pi) {
case META + '>': // meta->, end of history
case PAGE_DOWN_KEY: // Page Down, end of history
killRing.lastAction = KillRing::actionOther;
- // if not already recalling, add the current line to the history list so we don't have to special case it
+ // if not already recalling, add the current line to the history list so we don't
+ // have to special case it
if (historyIndex == historyLen - 1) {
free(history[historyLen - 1]);
size_t tempBufferSize = sizeof(UChar32) * len + 1;
@@ -2610,7 +2622,8 @@ void linenoisePreloadBuffer(const char* preloadText) {
* call it with a prompt to display and it will return a line of input from the user
*
* @param prompt text of prompt to display to the user
- * @return the returned string belongs to the caller on return and must be freed to prevent memory leaks
+ * @return the returned string belongs to the caller on return and must be freed to prevent
+ * memory leaks
*/
char* linenoise(const char* prompt) {
if (isatty(STDIN_FILENO)) { // input is from a terminal
diff --git a/src/mongo/shell/linenoise_utf8.cpp b/src/mongo/shell/linenoise_utf8.cpp
index a286956ad31..b45ce40eb1b 100644
--- a/src/mongo/shell/linenoise_utf8.cpp
+++ b/src/mongo/shell/linenoise_utf8.cpp
@@ -43,8 +43,8 @@ namespace linenoise_utf8 {
* Convert a null terminated UTF-8 string from UTF-8 and store it in a UChar32 destination buffer
* Always null terminates the destination string if at least one character position is available
* Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will be
- * converted to the Unicode error character U+FFFD and flag bits will be set in the conversionErrorCode
- * int.
+ * converted to the Unicode error character U+FFFD and flag bits will be set in the
+ * conversionErrorCode int.
*
* @param uchar32output Destination UChar32 buffer
* @param utf8input Source UTF-8 string
@@ -212,15 +212,16 @@ void copyString32(UChar32* dest32, const UChar32* source32, size_t destLengthInC
}
/**
- * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 string to UTF-8
- * and store it in a UChar8 destination buffer
+ * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 string
+ * to UTF-8 and store it in a UChar8 destination buffer
* Always null terminates the destination string if at least one character position is available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
* @param charCount Maximum number of UChar32 characters to process
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8counted(UChar8* dest8,
const UChar32* source32,
@@ -263,7 +264,8 @@ size_t copyString32to8counted(UChar8* dest8,
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8(UChar8* dest8, const UChar32* source32, size_t outputBufferSizeInBytes) {
return copyString32to8counted(dest8, source32, outputBufferSizeInBytes, 0x7FFFFFFF);
@@ -303,7 +305,8 @@ int strncmp32(UChar32* first32, UChar32* second32, size_t length) {
}
/**
- * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to fileHandle
+ * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to
+ * fileHandle
*
* @param fileHandle File handle to write to
* @param string32 Source UChar32 characters, may not be null terminated
diff --git a/src/mongo/shell/linenoise_utf8.h b/src/mongo/shell/linenoise_utf8.h
index 3826a23180f..667d32d9e8a 100644
--- a/src/mongo/shell/linenoise_utf8.h
+++ b/src/mongo/shell/linenoise_utf8.h
@@ -41,11 +41,11 @@ typedef unsigned int UChar32; // Unicode code point
enum BadUTF8 { BadUTF8_no_error = 0x00, BadUTF8_invalid_byte = 0x01, BadUTF8_surrogate = 0x02 };
/**
- * Convert a null terminated UTF-8 std::string from UTF-8 and store it in a UChar32 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
- * Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will be
- * converted to the Unicode error character U+FFFD and flag bits will be set in the conversionErrorCode
- * int.
+ * Convert a null terminated UTF-8 std::string from UTF-8 and store it in a UChar32 destination
+ * buffer Always null terminates the destination std::string if at least one character position is
+ * available Errors in the UTF-8 encoding will be handled in two ways: the erroneous characters will
+ * be converted to the Unicode error character U+FFFD and flag bits will be set in the
+ * conversionErrorCode int.
*
* @param uchar32output Destination UChar32 buffer
* @param utf8input Source UTF-8 string
@@ -61,7 +61,8 @@ void copyString8to32(UChar32* uchar32output,
/**
* Copy a null terminated UChar32 std::string to a UChar32 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest32 Destination UChar32 buffer
* @param source32 Source UChar32 string
@@ -70,15 +71,17 @@ void copyString8to32(UChar32* uchar32output,
void copyString32(UChar32* dest32, const UChar32* source32, size_t destLengthInCharacters);
/**
- * Convert a specified number of UChar32 characters from a possibly null terminated UChar32 std::string to UTF-8
- * and store it in a UChar8 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Convert a specified number of UChar32 characters from a possibly null terminated UChar32
+ * std::string to UTF-8 and store it in a UChar8 destination buffer
+ * Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
* @param charCount Maximum number of UChar32 characters to process
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8counted(UChar8* dest8,
const UChar32* source32,
@@ -86,13 +89,15 @@ size_t copyString32to8counted(UChar8* dest8,
size_t charCount);
/**
- * Convert a null terminated UChar32 std::string to UTF-8 and store it in a UChar8 destination buffer
- * Always null terminates the destination std::string if at least one character position is available
+ * Convert a null terminated UChar32 std::string to UTF-8 and store it in a UChar8 destination
+ * buffer Always null terminates the destination std::string if at least one character position is
+ * available
*
* @param dest8 Destination UChar8 buffer
* @param source32 Source UChar32 string
* @param outputBufferSizeInBytes Destination buffer size in bytes
- * @return Count of bytes written to output buffer, not including null terminator
+ * @return Count of bytes written to output buffer, not including null
+ * terminator
*/
size_t copyString32to8(UChar8* dest8, const UChar32* source32, size_t outputBufferSizeInBytes);
@@ -115,7 +120,8 @@ size_t strlen32(const UChar32* str32);
int strncmp32(UChar32* first32, UChar32* second32, size_t length);
/**
- * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to fileHandle
+ * Internally convert an array of UChar32 characters of specified length to UTF-8 and write it to
+ * fileHandle
*
* @param fileHandle File handle to write to
* @param string32 Source UChar32 character array, may not be null terminated
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index 7b488ff5cba..b00a250cbdb 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -760,8 +760,9 @@ int killDb(int port, ProcessId _pid, int signal, const BSONObj& opt) {
} else {
registry.deletePid(pid);
}
- // FIXME I think the intention here is to do an extra sleep only when SIGKILL is sent to the child process.
- // We may want to change the 4 below to 29, since values of i greater than that indicate we sent a SIGKILL.
+ // FIXME I think the intention here is to do an extra sleep only when SIGKILL is sent to the
+ // child process. We may want to change the 4 below to 29, since values of i greater than that
+ // indicate we sent a SIGKILL.
if (i > 4 || signal == SIGKILL) {
sleepmillis(4000); // allow operating system to reclaim resources
}
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index 43117fe3318..4a932db33f9 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -87,8 +87,10 @@ struct MONGO_CLIENT_API ExceptionInfo {
int code;
};
-/** helper class that builds error strings. lighter weight than a StringBuilder, albeit less flexible.
- NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code path when used.
+/** helper class that builds error strings. lighter weight than a StringBuilder, albeit less
+ * flexible.
+ NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code
+ path when used.
example:
throw UserException(123, ErrorMsg("blah", num_val));
diff --git a/src/mongo/util/background.h b/src/mongo/util/background.h
index f6693c0e318..978fa52ceed 100644
--- a/src/mongo/util/background.h
+++ b/src/mongo/util/background.h
@@ -72,8 +72,8 @@ protected:
* after this returns, deleted if deleteSelf true.
*
* NOTE:
- * if run() throws, the exception will be caught within 'this' object and will ultimately lead to the
- * BackgroundJob's thread being finished, as if run() returned.
+ * if run() throws, the exception will be caught within 'this' object and will ultimately lead
+ * to the BackgroundJob's thread being finished, as if run() returned.
*
*/
virtual void run() = 0;
diff --git a/src/mongo/util/concurrency/value.h b/src/mongo/util/concurrency/value.h
index 4be9c3d14e5..0189c4ed521 100644
--- a/src/mongo/util/concurrency/value.h
+++ b/src/mongo/util/concurrency/value.h
@@ -74,7 +74,8 @@ public:
set(rhs.get());
}
- // == is not defined. use get() == ... instead. done this way so one thinks about if composing multiple operations
+ // == is not defined. use get() == ... instead. done this way so one thinks about if composing
+ // multiple operations
bool operator==(const std::string& s) const;
};
}
diff --git a/src/mongo/util/debugger.cpp b/src/mongo/util/debugger.cpp
index d4b722e2a0b..5fed04a0e88 100644
--- a/src/mongo/util/debugger.cpp
+++ b/src/mongo/util/debugger.cpp
@@ -52,7 +52,8 @@ void breakpoint() {
#ifndef _WIN32
// code to raise a breakpoint in GDB
ONCE {
- // prevent SIGTRAP from crashing the program if default action is specified and we are not in gdb
+ // prevent SIGTRAP from crashing the program if default action is specified and we are not
+ // in gdb
struct sigaction current;
sigaction(SIGTRAP, NULL, &current);
if (current.sa_handler == SIG_DFL) {
diff --git a/src/mongo/util/descriptive_stats-inl.h b/src/mongo/util/descriptive_stats-inl.h
index 1b7c91595d1..d09bc388cf7 100644
--- a/src/mongo/util/descriptive_stats-inl.h
+++ b/src/mongo/util/descriptive_stats-inl.h
@@ -82,9 +82,12 @@ DistributionEstimators<NumQuantiles>::DistributionEstimators()
* The quantile estimation follows the extended_p_square implementation in boost.accumulators.
* It differs by removing the ability to request arbitrary quantiles and computing exactly
* 'NumQuantiles' equidistant quantiles (plus minimum and maximum) instead.
- * See http://www.boost.org/doc/libs/1_51_0/doc/html/boost/accumulators/impl/extended_p_square_impl.html ,
- * R. Jain and I. Chlamtac, The P^2 algorithmus for dynamic calculation of quantiles and histograms without storing observations, Communications of the ACM, Volume 28 (October), Number 10, 1985, p. 1076-1085. and
- * K. E. E. Raatikainen, Simultaneous estimation of several quantiles, Simulation, Volume 49, Number 4 (October), 1986, p. 159-164.
+ * See
+ * http://www.boost.org/doc/libs/1_51_0/doc/html/boost/accumulators/impl/extended_p_square_impl.html
+ * , R. Jain and I. Chlamtac, The P^2 algorithmus for dynamic calculation of quantiles and
+ * histograms without storing observations, Communications of the ACM, Volume 28 (October), Number
+ * 10, 1985, p. 1076-1085. and K. E. E. Raatikainen, Simultaneous estimation of several quantiles,
+ * Simulation, Volume 49, Number 4 (October), 1986, p. 159-164.
*/
template <std::size_t NumQuantiles>
DistributionEstimators<NumQuantiles>& DistributionEstimators<NumQuantiles>::operator<<(
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
index a0d9c3f5da1..ca71a4e6b7c 100644
--- a/src/mongo/util/file_allocator.cpp
+++ b/src/mongo/util/file_allocator.cpp
@@ -317,7 +317,8 @@ void FileAllocator::ensureLength(int fd, long size) {
void FileAllocator::checkFailure() {
if (_failed) {
- // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
+ // we want to log the problem (diskfull.js expects it) but we do not
+ // want to dump a stack tracke
msgassertedNoTrace(12520, "new file allocation failure");
}
}
diff --git a/src/mongo/util/logfile.h b/src/mongo/util/logfile.h
index 2b823582b51..dc316d6cb76 100644
--- a/src/mongo/util/logfile.h
+++ b/src/mongo/util/logfile.h
@@ -52,7 +52,8 @@ public:
*/
void synchronousAppend(const void* buf, size_t len);
- /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
+ /** write at specified offset. must be aligned. noreturn until physically written. thread safe
+ * */
void writeAt(unsigned long long offset, const void* _bug, size_t _len);
void readAt(unsigned long long offset, void* _buf, size_t _len);
diff --git a/src/mongo/util/mmap.h b/src/mongo/util/mmap.h
index c99453d7d11..f0ce0d29139 100644
--- a/src/mongo/util/mmap.h
+++ b/src/mongo/util/mmap.h
@@ -124,7 +124,8 @@ public:
template <class F>
static void forEach(F fun);
- /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
+ /** note: you need to be in mmmutex when using this. forEach (above) handles that for you
+ * automatically.
*/
static std::set<MongoFile*>& getAllFiles();
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
index d72f10ab72d..8ceb8401f03 100644
--- a/src/mongo/util/mmap_win.cpp
+++ b/src/mongo/util/mmap_win.cpp
@@ -229,7 +229,8 @@ void* MemoryMappedFile::map(const char* filenameIn, unsigned long long& length,
verify(fd == 0 && len == 0); // can't open more than once
setFilename(filenameIn);
FileAllocator::get()->allocateAsap(filenameIn, length);
- /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
+ /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary
+ * perhaps. */
char filename[256];
strncpy(filename, filenameIn, 255);
filename[255] = 0;
diff --git a/src/mongo/util/moveablebuffer.h b/src/mongo/util/moveablebuffer.h
index 981197028af..571204ecfe2 100644
--- a/src/mongo/util/moveablebuffer.h
+++ b/src/mongo/util/moveablebuffer.h
@@ -32,7 +32,8 @@
namespace mongo {
-/** this is a sort of smart pointer class where we can move where something is and all the pointers will adjust.
+/** this is a sort of smart pointer class where we can move where something is and all the pointers
+ * will adjust.
not threadsafe.
*/
struct MoveableBuffer {
@@ -46,7 +47,8 @@ struct MoveableBuffer {
/* implementation (inlines) below */
-// this is a temp stub implementation...not really done yet - just having everything compile & such for checkpointing into git
+// this is a temp stub implementation...not really done yet - just having everything compile & such
+// for checkpointing into git
inline MoveableBuffer::MoveableBuffer() : p(0) {}
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
index 856658137ab..07370deeac0 100644
--- a/src/mongo/util/net/listen.h
+++ b/src/mongo/util/net/listen.h
@@ -96,7 +96,8 @@ public:
if (_timeTracker)
return _timeTracker->getMyElapsedTimeMillis();
- // should this assert or throw? seems like callers may not expect to get zero back, certainly not forever.
+ // should this assert or throw? seems like callers may not expect to get zero back,
+ // certainly not forever.
return 0;
}
diff --git a/src/mongo/util/net/message.h b/src/mongo/util/net/message.h
index d5ef5eae8d2..b1adccc1f80 100644
--- a/src/mongo/util/net/message.h
+++ b/src/mongo/util/net/message.h
@@ -330,7 +330,8 @@ inline int ConstView::dataLen() const {
class Message {
public:
- // we assume here that a vector with initial size 0 does no allocation (0 is the default, but wanted to make it explicit).
+ // we assume here that a vector with initial size 0 does no allocation (0 is the default, but
+ // wanted to make it explicit).
Message() : _buf(0), _data(0), _freeIt(false) {}
Message(void* data, bool freeIt) : _buf(0), _data(0), _freeIt(false) {
_setData(reinterpret_cast<char*>(data), freeIt);
@@ -486,7 +487,8 @@ private:
}
// if just one buffer, keep it in _buf, otherwise keep a sequence of buffers in _data
char* _buf;
- // byte buffer(s) - the first must contain at least a full MsgData unless using _buf for storage instead
+ // byte buffer(s) - the first must contain at least a full MsgData unless using _buf for storage
+ // instead
typedef std::vector<std::pair<char*, int>> MsgVec;
MsgVec _data;
bool _freeIt;
diff --git a/src/mongo/util/net/message_port.h b/src/mongo/util/net/message_port.h
index 27637acb5ac..f1e37328368 100644
--- a/src/mongo/util/net/message_port.h
+++ b/src/mongo/util/net/message_port.h
@@ -48,8 +48,8 @@ public:
virtual void reply(
Message& received,
Message& response,
- MSGID
- responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
+ // like the reply below, but doesn't rely on received.data still being available
+ MSGID responseTo) = 0;
virtual void reply(Message& received, Message& response) = 0;
virtual HostAndPort remote() const = 0;
diff --git a/src/mongo/util/net/miniwebserver.h b/src/mongo/util/net/miniwebserver.h
index 8c3cfa72833..36320c342ae 100644
--- a/src/mongo/util/net/miniwebserver.h
+++ b/src/mongo/util/net/miniwebserver.h
@@ -45,15 +45,14 @@ public:
MiniWebServer(const std::string& name, const std::string& ip, int _port);
virtual ~MiniWebServer() {}
- virtual void doRequest(
- const char* rq, // the full request
- std::string url,
- // set these and return them:
- std::string& responseMsg,
- int& responseCode,
- std::vector<std::string>&
- headers, // if completely empty, content-type: text/html will be added
- const SockAddr& from) = 0;
+ virtual void doRequest(const char* rq, // the full request
+ std::string url,
+ // set these and return them:
+ std::string& responseMsg,
+ int& responseCode,
+ std::vector<std::string>& headers, // if completely empty, content-type:
+ // text/html will be added
+ const SockAddr& from) = 0;
// --- static helpers ----
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
index 6abe04a82d7..751f9870fe5 100644
--- a/src/mongo/util/ntservice.cpp
+++ b/src/mongo/util/ntservice.cpp
@@ -292,7 +292,8 @@ void installServiceOrDie(const wstring& serviceName,
while (true) {
// Make sure service doesn't already exist.
- // TODO: Check to see if service is in "Deleting" status, suggest the user close down Services MMC snap-ins.
+ // TODO: Check to see if service is in "Deleting" status, suggest the user close down
+ // Services MMC snap-ins.
schService = ::OpenService(schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS);
if (schService != NULL) {
log() << "There is already a service named '" << toUtf8String(serviceName)
@@ -381,13 +382,13 @@ void installServiceOrDie(const wstring& serviceName,
if (!serviceInstalled) {
#else
// This code sets the mongod service to auto-restart, forever.
- // This might be a fine thing to do except that when mongod or Windows has a crash, the mongo.lock
- // file is still around, so any attempt at a restart will immediately fail. With auto-restart, we
- // go into a loop, crashing and restarting, crashing and restarting, until someone comes in and
- // disables the service or deletes the mongod.lock file.
+ // This might be a fine thing to do except that when mongod or Windows has a crash, the
+ // mongo.lock file is still around, so any attempt at a restart will immediately fail. With
+ // auto-restart, we go into a loop, crashing and restarting, crashing and restarting, until
+ // someone comes in and disables the service or deletes the mongod.lock file.
//
- // I'm leaving the old code here for now in case we solve this and are able to turn SC_ACTION_RESTART
- // back on.
+ // I'm leaving the old code here for now in case we solve this and are able to turn
+ // SC_ACTION_RESTART back on.
//
if (serviceInstalled) {
SC_ACTION aActions[3] = {
diff --git a/src/mongo/util/options_parser/option_description.h b/src/mongo/util/options_parser/option_description.h
index 1dce8ea2980..4c50f0abbff 100644
--- a/src/mongo/util/options_parser/option_description.h
+++ b/src/mongo/util/options_parser/option_description.h
@@ -174,8 +174,9 @@ public:
OptionDescription& validRange(long min, long max);
/**
- * Specifies that this option is incompatible with another option. The std::string provided must
- * be the dottedName, which is the name used to access the option in the result Environment.
+ * Specifies that this option is incompatible with another option. The std::string provided
+ * must be the dottedName, which is the name used to access the option in the result
+ * Environment.
*
* TODO: Find a way to check that that option actually exists in our section somewhere.
*/
diff --git a/src/mongo/util/options_parser/options_parser.h b/src/mongo/util/options_parser/options_parser.h
index 9c3752f1660..f247fb6dfb0 100644
--- a/src/mongo/util/options_parser/options_parser.h
+++ b/src/mongo/util/options_parser/options_parser.h
@@ -108,7 +108,8 @@ private:
const std::vector<std::string>& argv,
Environment*);
- /** Handles parsing of an INI config std::string and adds the results to the given Environment */
+ /** Handles parsing of an INI config std::string and adds the results to the given Environment
+ * */
Status parseINIConfigFile(const OptionSection&, const std::string& config, Environment*);
/** Gets defaults from the OptionSection and adds them to the given Environment */
diff --git a/src/mongo/util/processinfo_linux2.cpp b/src/mongo/util/processinfo_linux2.cpp
index 91a39cbe29a..d1ed23a4861 100644
--- a/src/mongo/util/processinfo_linux2.cpp
+++ b/src/mongo/util/processinfo_linux2.cpp
@@ -138,11 +138,13 @@ public:
// The process ID.
char _comm[128];
- // The filename of the executable, in parentheses. This is visible whether or not the executable is swapped out.
+ // The filename of the executable, in parentheses. This is visible whether or not the
+ // executable is swapped out.
char _state;
- // One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible
- // disk sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
+ // One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible
+ // wait, D is waiting in uninterruptible disk sleep, Z is zombie, T is traced or stopped (on a
+ // signal), and W is paging.
int _ppid;
// The PID of the parent.
@@ -157,19 +159,23 @@ public:
// The tty the process uses.
int _tpgid;
- // The process group ID of the process which currently owns the tty that the process is connected to.
+ // The process group ID of the process which currently owns the tty that the process is
+ // connected to.
unsigned long _flags; // %lu
- // The kernel flags word of the process. For bit meanings, see the PF_* defines in <linux/sched.h>. Details depend on the kernel version.
+ // The kernel flags word of the process. For bit meanings, see the PF_* defines in
+ // <linux/sched.h>. Details depend on the kernel version.
unsigned long _min_flt; // %lu
- // The number of minor faults the process has made which have not required loading a memory page from disk.
+ // The number of minor faults the process has made which have not required loading a memory page
+ // from disk.
unsigned long _cmin_flt; // %lu
// The number of minor faults that the process
unsigned long _maj_flt; // %lu
- // The number of major faults the process has made which have required loading a memory page from disk.
+ // The number of major faults the process has made which have required loading a memory page
+ // from disk.
unsigned long _cmaj_flt; // %lu
// The number of major faults that the process
@@ -192,7 +198,8 @@ public:
// number of threads
unsigned long _alarm;
- // The time in jiffies before the next SIGALRM is sent to the process due to an interval timer. (unused since 2.6.17)
+ // The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
+ // (unused since 2.6.17)
unsigned long _start_time; // %lu
// The time in jiffies the process started after system boot.
@@ -201,8 +208,9 @@ public:
// Virtual memory size in bytes.
long _rss; // %ld
- // Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which
- // count towards text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out
+ // Resident Set Size: number of pages the process has in real memory, minus 3 for administrative
+ // purposes. This is just the pages which count towards text, data, or stack space. This
+ // does not include pages which have not been demand-loaded in, or which are swapped out
unsigned long _rss_rlim; // %lu
// Current limit in bytes on the rss of the process (usually 4294967295 on i386).
@@ -310,7 +318,8 @@ public:
}
// try known flat-text file locations
- // format: Slackware-x86_64 13.0, Red Hat Enterprise Linux Server release 5.6 (Tikanga), etc.
+ // format: Slackware-x86_64 13.0, Red Hat Enterprise Linux Server release 5.6 (Tikanga),
+ // etc.
typedef vector<string> pathvec;
pathvec paths;
pathvec::const_iterator i;
diff --git a/src/mongo/util/processinfo_win32.cpp b/src/mongo/util/processinfo_win32.cpp
index e0d84214bc7..34391de46d0 100644
--- a/src/mongo/util/processinfo_win32.cpp
+++ b/src/mongo/util/processinfo_win32.cpp
@@ -438,8 +438,9 @@ bool ProcessInfo::blockInMemory(const void* start) {
if (bstat) {
for (int i=0; i<30; i++) {
if (wiex[i].BasicInfo.FaultingPc == 0) break;
- cout << "faulting pc = " << wiex[i].BasicInfo.FaultingPc << " address = " << wiex[i].BasicInfo.FaultingVa << " thread id = " << wiex[i].FaultingThreadId << endl;
- }
+ cout << "faulting pc = " << wiex[i].BasicInfo.FaultingPc << "address = "
+ << wiex[i].BasicInfo.FaultingVa << " thread id = "
+ << wiex[i].FaultingThreadId << endl; }
}
#endif
PSAPI_WORKING_SET_EX_INFORMATION wsinfo;
diff --git a/src/mongo/util/progress_meter.h b/src/mongo/util/progress_meter.h
index 3dc116886f7..fbf9887311b 100644
--- a/src/mongo/util/progress_meter.h
+++ b/src/mongo/util/progress_meter.h
@@ -126,8 +126,8 @@ private:
// e.g.:
// CurOp * op = txn.getCurOp();
-// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: External Sort Progress", d->stats.nrecords, 10));
-// loop { pm.hit(); }
+// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: External Sort
+// Progress", d->stats.nrecords, 10)); loop { pm.hit(); }
class ProgressMeterHolder : boost::noncopyable {
public:
ProgressMeterHolder(ProgressMeter& pm) : _pm(pm) {}
diff --git a/src/mongo/util/ptr.h b/src/mongo/util/ptr.h
index 5ad5d94cb0e..98213ec8689 100644
--- a/src/mongo/util/ptr.h
+++ b/src/mongo/util/ptr.h
@@ -85,7 +85,8 @@ struct ptr {
_p = p.get();
return *this;
}
- // template<typename U> ptr& operator= (const std::auto_ptr<U>& p) { _p = p.get(); return *this; }
+ // template<typename U> ptr& operator= (const std::auto_ptr<U>& p) { _p = p.get(); return *this;
+ // }
// use
T* operator->() const {
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
index 2161b537a4c..0d1066d0ead 100644
--- a/src/mongo/util/queue.h
+++ b/src/mongo/util/queue.h
@@ -49,7 +49,8 @@ size_t _getSizeDefault(const T& t) {
* A custom sizing function can optionally be given. By default the getSize function
* returns 1 for each item, resulting in size equaling the number of items queued.
*
- * Note that use of this class is deprecated. This class only works with a single consumer and * a single producer.
+ * Note that use of this class is deprecated. This class only works with a single consumer and
+ * * a single producer.
*/
template <typename T>
class BlockingQueue : boost::noncopyable {
diff --git a/src/mongo/util/safe_num.cpp b/src/mongo/util/safe_num.cpp
index 1350fca1c78..05eabe803e0 100644
--- a/src/mongo/util/safe_num.cpp
+++ b/src/mongo/util/safe_num.cpp
@@ -172,7 +172,9 @@ SafeNum addInt32Int32(int lInt32, int rInt32) {
// NOTE: Please see "Secure Coding in C and C++", Second Edition, page 264-265 for
// details on this algorithm (for an alternative resources, see
//
- // https://www.securecoding.cert.org/confluence/display/seccode/INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?showComments=false).
+ // https://www.securecoding.cert.org/confluence/display/seccode/
+ // INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow
+ // ?showComments=false).
//
// We are using the "Downcast from a larger type" algorithm here. We always perform
// the arithmetic in 64-bit mode, which can never overflow for 32-bit
@@ -214,7 +216,9 @@ SafeNum mulInt32Int32(int lInt32, int rInt32) {
// NOTE: Please see "Secure Coding in C and C++", Second Edition, page 264-265 for
// details on this algorithm (for an alternative resources, see
//
- // https://www.securecoding.cert.org/confluence/display/seccode/INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow?showComments=false).
+ // https://www.securecoding.cert.org/confluence/display/seccode/
+ // INT32-C.+Ensure+that+operations+on+signed+integers+do+not+result+in+overflow
+ // ?showComments=false).
//
// We are using the "Downcast from a larger type" algorithm here. We always perform
// the arithmetic in 64-bit mode, which can never overflow for 32-bit
diff --git a/src/mongo/util/startup_test.h b/src/mongo/util/startup_test.h
index fd1b220e1d9..ed87d78ed38 100644
--- a/src/mongo/util/startup_test.h
+++ b/src/mongo/util/startup_test.h
@@ -33,15 +33,16 @@
namespace mongo {
-/* The idea here is to let all initialization of global variables (classes inheriting from StartupTest)
+/* The idea here is to let all initialization of global variables (classes inheriting from
+ * StartupTest)
complete before we run the tests -- otherwise order of initilization being arbitrary may mess
us up. The app's main() function should call runTests().
- To define a unit test, inherit from this and implement run. instantiate one object for the new class
- as a global.
+ To define a unit test, inherit from this and implement run. instantiate one object for the new
+ class as a global.
- These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is a
- good quick check for a bad build.
+ These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is
+ a good quick check for a bad build.
*/
class StartupTest {
public:
diff --git a/src/mongo/util/winutil.h b/src/mongo/util/winutil.h
index 57e93730867..5d4b727b111 100644
--- a/src/mongo/util/winutil.h
+++ b/src/mongo/util/winutil.h
@@ -48,7 +48,8 @@ inline std::string GetWinErrMsg(DWORD err) {
NULL);
std::string errMsgStr = toUtf8String(errMsg);
::LocalFree(errMsg);
- // FormatMessage() appends a newline to the end of error messages, we trim it because std::endl flushes the buffer.
+ // FormatMessage() appends a newline to the end of error messages, we trim it because std::endl
+ // flushes the buffer.
errMsgStr = errMsgStr.erase(errMsgStr.length() - 2);
std::ostringstream output;
output << errMsgStr << " (" << err << ")";