summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--s/balance.cpp2
-rw-r--r--s/balancer_policy.cpp2
-rw-r--r--s/chunk.cpp6
-rw-r--r--s/client.cpp4
-rw-r--r--s/commands_admin.cpp16
-rw-r--r--s/d_chunk_manager.cpp8
-rw-r--r--s/d_migrate.cpp8
-rw-r--r--s/d_split.cpp10
-rw-r--r--s/d_state.cpp8
-rw-r--r--s/d_writeback.cpp2
-rw-r--r--s/grid.cpp2
-rw-r--r--s/shard_version.cpp2
-rw-r--r--s/strategy_shard.cpp2
-rw-r--r--s/writeback_listener.cpp2
14 files changed, 37 insertions, 37 deletions
diff --git a/s/balance.cpp b/s/balance.cpp
index fba325ed5ae..da25f3362c2 100644
--- a/s/balance.cpp
+++ b/s/balance.cpp
@@ -73,7 +73,7 @@ namespace mongo {
}
// the move requires acquiring the collection metadata's lock, which can fail
- log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
+ log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
<< " chunk: " << chunkToMove << endl;
if ( res["chunkTooBig"].trueValue() ) {
diff --git a/s/balancer_policy.cpp b/s/balancer_policy.cpp
index 1405d65f02d..1456b70c4d8 100644
--- a/s/balancer_policy.cpp
+++ b/s/balancer_policy.cpp
@@ -126,7 +126,7 @@ namespace mongo {
}
BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ) {
- // It is possible for a donor ('from') shard to have less chunks than a recevier one ('to')
+ // It is possible for a donor ('from') shard to have less chunks than a receiver one ('to')
// if the donor is in draining mode.
if ( to.size() == 0 )
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 945b52d2570..f8e8841c9ab 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -280,7 +280,7 @@ namespace mongo {
warning() << "splitChunk failed - cmd: " << cmdObj << " result: " << res << endl;
conn.done();
- // reloading won't stricly solve all problems, e.g. the collection's metdata lock can be taken
+ // reloading won't strictly solve all problems, e.g. the collection's metadata lock can be taken
// but we issue here so that mongos may refresh without needing to be written/read against
_manager->reload();
@@ -343,7 +343,7 @@ namespace mongo {
return false;
// this is a bit ugly
- // we need it so that mongos blocks for the writes to actually be commited
+ // we need it so that mongos blocks for the writes to actually be committed
// this does mean mongos has more back pressure than mongod alone
// since it nots 100% tcp queue bound
// this was implicit before since we did a splitVector on the same socket
@@ -364,7 +364,7 @@ namespace mongo {
log() << "autosplitted " << _manager->getns() << " shard: " << toString()
<< " on: " << splitPoint << "(splitThreshold " << splitThreshold << ")"
#ifdef _DEBUG
- << " size: " << getPhysicalSize() // slow - but can be usefule when debugging
+ << " size: " << getPhysicalSize() // slow - but can be useful when debugging
#endif
<< endl;
diff --git a/s/client.cpp b/s/client.cpp
index e94fccaec1a..5443df5de28 100644
--- a/s/client.cpp
+++ b/s/client.cpp
@@ -96,7 +96,7 @@ namespace mongo {
BSONElement cid = gle["connectionId"];
if ( cid.eoo() ) {
- error() << "getLastError writeback can't work because of version mis-match" << endl;
+ error() << "getLastError writeback can't work because of version mismatch" << endl;
return;
}
@@ -114,7 +114,7 @@ namespace mongo {
return res;
if ( fromWriteBackListener ) {
- LOG(1) << "not doing recusrive writeback" << endl;
+ LOG(1) << "not doing recursive writeback" << endl;
return res;
}
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 4031d4a01ef..1729071e677 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -254,7 +254,7 @@ namespace mongo {
Shard s = Shard::make( to );
if ( config->getPrimary() == s.getConnString() ) {
- errmsg = "thats already the primary";
+ errmsg = "is is already the primary";
return false;
}
@@ -408,7 +408,7 @@ namespace mongo {
// Sharding interacts with indexing in at least two ways:
//
- // 1. A unique index must have the sharding key as its prefix. Otherwise maintainig uniqueness would
+ // 1. A unique index must have the sharding key as its prefix. Otherwise maintaining uniqueness would
// require coordinated access to all shards. Trying to shard a collection with such an index is not
// allowed.
//
@@ -519,7 +519,7 @@ namespace mongo {
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
string ns = cmdObj.firstElement().valuestrsafe();
if ( ns.size() == 0 ) {
- errmsg = "need to speciy fully namespace";
+ errmsg = "need to specify fully namespace";
return false;
}
@@ -550,7 +550,7 @@ namespace mongo {
<< " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
<< " example: - split the shard that contains the key with this as the middle \n"
<< " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
- << " NOTE: this does not move move the chunks, it merely creates a logical seperation \n"
+ << " NOTE: this does not move move the chunks, it merely creates a logical separation \n"
;
}
@@ -645,7 +645,7 @@ namespace mongo {
if ( ! config->isSharded( ns ) ) {
config->reload();
if ( ! config->isSharded( ns ) ) {
- errmsg = "ns not sharded. have to shard before can move a chunk";
+ errmsg = "ns not sharded. have to shard before we can move a chunk";
return false;
}
}
@@ -688,7 +688,7 @@ namespace mongo {
return false;
}
- // pre-emptively reload the config to get new version info
+ // preemptively reload the config to get new version info
config->getChunkManager( ns , true );
result.append( "millis" , t.millis() );
@@ -738,7 +738,7 @@ namespace mongo {
return false;
}
- // using localhost in server names implies every other process must use locahost addresses too
+ // using localhost in server names implies every other process must use localhost addresses too
vector<HostAndPort> serverAddrs = servers.getServers();
for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) {
if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) {
@@ -1054,7 +1054,7 @@ namespace mongo {
if ( name == "local" ) {
// we don't return local
- // since all shards have their own independant local
+ // since all shards have their own independent local
continue;
}
diff --git a/s/d_chunk_manager.cpp b/s/d_chunk_manager.cpp
index 43348405753..b77280129d5 100644
--- a/s/d_chunk_manager.cpp
+++ b/s/d_chunk_manager.cpp
@@ -29,7 +29,7 @@ namespace mongo {
ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) {
// have to get a connection to the config db
- // special case if i'm the configdb since i'm locked and if i connect to myself
+ // special case if I'm the configdb since I'm locked and if i connect to myself
// its a deadlock
scoped_ptr<ScopedDbConnection> scoped;
scoped_ptr<DBDirectClient> direct;
@@ -112,7 +112,7 @@ namespace mongo {
BSONObj currMax = it->second;
++it;
- // coallesce the chunk's bounds in ranges if they are adjacent chunks
+ // coalesce the chunk's bounds in ranges if they are adjacent chunks
if ( min.isEmpty() ) {
min = currMin;
max = currMax;
@@ -206,7 +206,7 @@ namespace mongo {
ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) {
- // check that we have the exact chunk that'll be subtracted
+ // check that we have the exact chunk that will be subtracted
_assertChunkExists( min , max );
auto_ptr<ShardChunkManager> p( new ShardChunkManager );
@@ -285,7 +285,7 @@ namespace mongo {
uasserted( 14039 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
}
- // check that we have the exact chunk that'll be split and that the split point is valid
+ // check that we have the exact chunk that will be split and that the split point is valid
_assertChunkExists( min , max );
for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
if ( ! contains( min , max , *it ) ) {
diff --git a/s/d_migrate.cpp b/s/d_migrate.cpp
index 9e44811bf00..b5a18b67b65 100644
--- a/s/d_migrate.cpp
+++ b/s/d_migrate.cpp
@@ -133,7 +133,7 @@ namespace mongo {
};
struct OldDataCleanup {
- static AtomicUInt _numThreads; // how many threads are doing async cleanusp
+ static AtomicUInt _numThreads; // how many threads are doing async cleanup
string ns;
BSONObj min;
@@ -439,7 +439,7 @@ namespace mongo {
// use the average object size to estimate how many objects a full chunk would carry
// do that while traversing the chunk's range using the sharding index, below
- // there's a fair amout of slack before we determine a chunk is too large because object sizes will vary
+ // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
unsigned long long maxRecsWhenFull;
long long avgRecSize;
const long long totalRecs = d->stats.nrecords;
@@ -479,7 +479,7 @@ namespace mongo {
}
if ( isLargeChunk ) {
- warning() << "can't move chunk of size (aprox) " << recCount * avgRecSize
+ warning() << "can't move chunk of size (approximately) " << recCount * avgRecSize
<< " because maximum size allowed to move is " << maxChunkSize
<< " ns: " << _ns << " " << _min << " -> " << _max
<< migrateLog;
@@ -856,7 +856,7 @@ namespace mongo {
log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
if ( ! ok || res["state"].String() == "fail" ) {
- warning() << "moveChunk error transfering data caused migration abort: " << res << migrateLog;
+ warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog;
errmsg = "data transfer error";
result.append( "cause" , res );
return false;
diff --git a/s/d_split.cpp b/s/d_split.cpp
index 1cf5dcb0c7c..d68238dc995 100644
--- a/s/d_split.cpp
+++ b/s/d_split.cpp
@@ -74,7 +74,7 @@ namespace mongo {
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
- // only yielding on firt half for now
+ // only yielding on first half for now
// after this it should be in ram, so 2nd should be fast
{
shared_ptr<Cursor> c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) );
@@ -423,7 +423,7 @@ namespace mongo {
// we were near and and got pushed to the end
// i think returning the splits we've already found is fine
- // don't use the btree cursor pointer to acces keys beyond this point but ok
+ // don't use the btree cursor pointer to access keys beyond this point but ok
// to use it for format the keys we've got already
cc.release();
break;
@@ -544,13 +544,13 @@ namespace mongo {
const BSONObj min = cmdObj["min"].Obj();
if ( min.isEmpty() ) {
- errmsg = "neet to specify the min key for the chunk";
+ errmsg = "need to specify the min key for the chunk";
return false;
}
const BSONObj max = cmdObj["max"].Obj();
if ( max.isEmpty() ) {
- errmsg = "neet to specify the max key for the chunk";
+ errmsg = "need to specify the max key for the chunk";
return false;
}
@@ -709,7 +709,7 @@ namespace mongo {
op.appendBool( "b" , true );
op.append( "ns" , ShardNS::chunk );
- // add the modified (new) chunk infomation as the update object
+ // add the modified (new) chunk information as the update object
BSONObjBuilder n( op.subobjStart( "o" ) );
n.append( "_id" , Chunk::genID( ns , startKey ) );
n.appendTimestamp( "lastmod" , myVersion );
diff --git a/s/d_state.cpp b/s/d_state.cpp
index ad6f3fe3c8b..5d60db47c05 100644
--- a/s/d_state.cpp
+++ b/s/d_state.cpp
@@ -476,7 +476,7 @@ namespace mongo {
string ns = cmdObj["setShardVersion"].valuestrsafe();
if ( ns.size() == 0 ) {
- errmsg = "need to speciy namespace";
+ errmsg = "need to specify namespace";
return false;
}
@@ -493,7 +493,7 @@ namespace mongo {
if ( globalVersion > 0 && version > 0 ) {
// this means there is no reset going on an either side
- // so its safe to make some assuptions
+ // so its safe to make some assumptions
if ( version == globalVersion ) {
// mongos and mongod agree!
@@ -577,7 +577,7 @@ namespace mongo {
ShardChunkVersion currVersion = version;
if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
- errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'";
+ errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
result.append( "ns" , ns );
result.appendTimestamp( "version" , version );
result.appendTimestamp( "globalVersion" , currVersion );
@@ -674,7 +674,7 @@ namespace mongo {
if ( version == 0 && clientVersion > 0 ) {
stringstream ss;
- ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
+ ss << "collection was dropped or this shard no longer valid version: " << version << " clientVersion: " << clientVersion;
errmsg = ss.str();
return false;
}
diff --git a/s/d_writeback.cpp b/s/d_writeback.cpp
index 0aca4e34708..5fa5aecaf97 100644
--- a/s/d_writeback.cpp
+++ b/s/d_writeback.cpp
@@ -165,7 +165,7 @@ namespace mongo {
void help(stringstream& help) const {
help << "Returns whether there are operations in the writeback queue at the time the command was called. "
- << "This is an internal comand";
+ << "This is an internal command";
}
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
diff --git a/s/grid.cpp b/s/grid.cpp
index abf85135d71..8ba0d706080 100644
--- a/s/grid.cpp
+++ b/s/grid.cpp
@@ -119,7 +119,7 @@ namespace mongo {
}
bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) {
- // name can be NULL, so privide a dummy one here to avoid testing it elsewhere
+ // name can be NULL, so provide a dummy one here to avoid testing it elsewhere
string nameInternal;
if ( ! name ) {
name = &nameInternal;
diff --git a/s/shard_version.cpp b/s/shard_version.cpp
index e756a7f2c80..736ed69ab3f 100644
--- a/s/shard_version.cpp
+++ b/s/shard_version.cpp
@@ -101,7 +101,7 @@ namespace mongo {
}
// has the ChunkManager been reloaded since the last time we updated the connection-level version?
- // (ie, last time we issued the setShardVersions below)
+ // (ie., last time we issued the setShardVersions below)
unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns);
if ( sequenceNumber == officialSequenceNumber ) {
return false;
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 9d625f11d0a..69611e6241c 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -177,7 +177,7 @@ namespace mongo {
unsigned long long old = manager->getSequenceNumber();
manager = r.getChunkManager();
- LOG( logLevel ) << " sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
+ LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
if (!manager) {
keepGoing = false;
diff --git a/s/writeback_listener.cpp b/s/writeback_listener.cpp
index d1429284fcb..a166ce58dbb 100644
--- a/s/writeback_listener.cpp
+++ b/s/writeback_listener.cpp
@@ -94,7 +94,7 @@ namespace mongo {
WBStatus s = _seenWritebacks[ident];
if ( oid < s.id ) {
// this means we're waiting for a GLE that already passed.
- // it should be impossible becauseonce we call GLE, no other
+ // it should be impossible because once we call GLE, no other
// writebacks should happen with that connection id
msgasserted( 14041 , str::stream() << "got writeback waitfor for older id " <<