summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlberto Lerner <alerner@10gen.com>2010-05-26 13:18:25 -0400
committerAlberto Lerner <alerner@10gen.com>2010-05-26 13:18:25 -0400
commit2e1f93c5735d1575da1efc3e3f38be9436aea228 (patch)
tree0751c055292bbd59956a7d5f3e5753fbd6732359
parent4021dd2187ecc960aa0f348e9c734c4236db656d (diff)
parent8e4612979e382e7a6cb54a82dcaa88cfba36254e (diff)
downloadmongo-2e1f93c5735d1575da1efc3e3f38be9436aea228.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
-rw-r--r--SConstruct6
-rw-r--r--db/dbcommands.cpp6
-rw-r--r--db/dbwebserver.cpp2
-rw-r--r--jstests/index_many.js98
-rw-r--r--jstests/index_many2.js29
-rw-r--r--jstests/slow/sharding_balance1.js56
-rw-r--r--s/balance.cpp18
-rw-r--r--s/chunk.cpp2
-rw-r--r--s/chunk.h8
-rw-r--r--s/commands_admin.cpp12
-rw-r--r--s/commands_public.cpp16
-rw-r--r--s/config.cpp22
-rw-r--r--s/config.h9
-rw-r--r--s/request.cpp2
-rw-r--r--s/request.h4
-rw-r--r--s/strategy.cpp4
-rw-r--r--s/strategy_shard.cpp10
-rw-r--r--s/strategy_single.cpp2
-rw-r--r--util/hashtab.h7
19 files changed, 207 insertions, 106 deletions
diff --git a/SConstruct b/SConstruct
index 95e3bfda7b6..c196c35a43c 100644
--- a/SConstruct
+++ b/SConstruct
@@ -436,11 +436,13 @@ serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
modules = []
+moduleNames = []
for x in os.listdir( "db/modules/" ):
if x.find( "." ) >= 0:
continue
print( "adding module: " + x )
+ moduleNames.append( x )
modRoot = "db/modules/" + x + "/"
serverOnlyFiles += Glob( modRoot + "src/*.cpp" )
modBuildFile = modRoot + "build.py"
@@ -781,6 +783,8 @@ def add_exe(target):
def setupBuildInfoFile( outFile ):
version = getGitVersion()
+ if len(moduleNames) > 0:
+ version = version + " modules: " + ','.join( moduleNames )
sysInfo = getSysInfo()
contents = '\n'.join([
'#include "pch.h"',
@@ -1365,7 +1369,7 @@ testEnv.AlwaysBuild( "startMongodSmallOplog" );
testEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
def addMongodReqTargets( env, target, source ):
- mongodReqTargets = [ "smokeClient", "smokeJs", "smokeQuota" ]
+ mongodReqTargets = [ "smokeClient", "smokeJs" ]
for target in mongodReqTargets:
testEnv.Depends( target, "startMongod" )
testEnv.Depends( "smokeAll", target )
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index fc3f55b3cce..bbb0f2f3cde 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1833,6 +1833,7 @@ namespace mongo {
returns true if ran a cmd
*/
bool _runCommands(const char *ns, BSONObj& _cmdobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
+ cc().curop()->ensureStarted();
string dbname = nsToDatabase( ns );
if( logLevel >= 1 )
@@ -1859,6 +1860,7 @@ namespace mongo {
BSONElement e = jsobj.firstElement();
Command * c = e.type() ? Command::findCommand( e.fieldName() ) : 0;
+
if ( c ){
ok = execCommand( c , client , queryOptions , ns , jsobj , anObjBuilder , fromRepl );
}
@@ -1866,10 +1868,12 @@ namespace mongo {
anObjBuilder.append("errmsg", "no such cmd");
anObjBuilder.append("bad cmd" , _cmdobj );
}
+
anObjBuilder.append("ok", ok);
BSONObj x = anObjBuilder.done();
b.append((void*) x.objdata(), x.objsize());
+
return true;
}
-
+
} // namespace mongo
diff --git a/db/dbwebserver.cpp b/db/dbwebserver.cpp
index ec4189cf036..dff6b265a8a 100644
--- a/db/dbwebserver.cpp
+++ b/db/dbwebserver.cpp
@@ -499,7 +499,7 @@ namespace mongo {
ss << "write locked:</a> " << (dbMutex.info().isLocked() ? "true" : "false") << "\n";
{
Timer t;
- readlocktry lk( "" , 2000 );
+ readlocktry lk( "" , 300 );
if ( lk.got() ){
ss << "time to get readlock: " << t.millis() << "ms\n";
doLockedStuff(ss);
diff --git a/jstests/index_many.js b/jstests/index_many.js
index 6e12485490c..46705a20470 100644
--- a/jstests/index_many.js
+++ b/jstests/index_many.js
@@ -1,51 +1,51 @@
/* test using lots of indexes on one collection */
-t = db.many;
-
-function f() {
-
- t.drop();
- db.many2.drop();
-
- t.save({ x: 9, y : 99 });
- t.save({ x: 19, y : 99 });
-
- x = 2;
- while (x < 70) {
- patt = {};
- patt[x] = 1;
- if (x == 20)
- patt = { x: 1 };
- if (x == 64)
- patt = { y: 1 };
- t.ensureIndex(patt);
- x++;
- }
-
- // print( tojson(db.getLastErrorObj()) );
- assert(db.getLastError(), "should have got an error 'too many indexes'");
-
- // 40 is the limit currently
- lim = t.getIndexes().length;
- if (lim != 64) {
- print("# of indexes should be 64 but is : " + lim);
- return;
- }
- assert(lim == 64, "not 64 indexes");
-
- assert(t.find({ x: 9 }).length() == 1, "b");
- assert(t.find({ x: 9 }).explain().cursor.match(/Btree/), "not using index?");
-
- assert(t.find({ y: 99 }).length() == 2, "y idx");
- assert(t.find({ y: 99 }).explain().cursor.match(/Btree/), "not using y index?");
-
- /* check that renamecollection remaps all the indexes right */
- assert(t.renameCollection("many2").ok, "rename failed");
- assert(t.find({ x: 9 }).length() == 0, "many2a");
- assert(db.many2.find({ x: 9 }).length() == 1, "many2b");
- assert(t.find({ y: 99 }).length() == 0, "many2c");
- assert(db.many2.find({ y: 99 }).length() == 2, "many2d");
-
-}
-
-f();
+t = db.many;
+
+function f() {
+
+ t.drop();
+ db.many2.drop();
+
+ t.save({ x: 9, y : 99 });
+ t.save({ x: 19, y : 99 });
+
+ x = 2;
+ while (x < 70) {
+ patt = {};
+ patt[x] = 1;
+ if (x == 20)
+ patt = { x: 1 };
+ if (x == 64)
+ patt = { y: 1 };
+ t.ensureIndex(patt);
+ x++;
+ }
+
+ // print( tojson(db.getLastErrorObj()) );
+ assert(db.getLastError(), "should have got an error 'too many indexes'");
+
+ // 40 is the limit currently
+ lim = t.getIndexes().length;
+ if (lim != 64) {
+ print("# of indexes should be 64 but is : " + lim);
+ return;
+ }
+ assert(lim == 64, "not 64 indexes");
+
+ assert(t.find({ x: 9 }).length() == 1, "b");
+ assert(t.find({ x: 9 }).explain().cursor.match(/Btree/), "not using index?");
+
+ assert(t.find({ y: 99 }).length() == 2, "y idx");
+ assert(t.find({ y: 99 }).explain().cursor.match(/Btree/), "not using y index?");
+
+ /* check that renamecollection remaps all the indexes right */
+ assert(t.renameCollection("many2").ok, "rename failed");
+ assert(t.find({ x: 9 }).length() == 0, "many2a");
+ assert(db.many2.find({ x: 9 }).length() == 1, "many2b");
+ assert(t.find({ y: 99 }).length() == 0, "many2c");
+ assert(db.many2.find({ y: 99 }).length() == 2, "many2d");
+
+}
+
+f();
diff --git a/jstests/index_many2.js b/jstests/index_many2.js
new file mode 100644
index 00000000000..3fca5f538f2
--- /dev/null
+++ b/jstests/index_many2.js
@@ -0,0 +1,29 @@
+
+t = db.index_many2;
+t.drop()
+
+t.save( { x : 1 } )
+
+assert.eq( 1 , t.getIndexKeys().length , "A1" )
+
+function make( n ){
+ var x = {}
+ x["x"+n] = 1;
+ return x;
+}
+
+for ( i=1; i<1000; i++ ){
+ t.ensureIndex( make(i) );
+}
+
+assert.eq( 64 , t.getIndexKeys().length , "A2" )
+
+
+num = t.getIndexKeys().length
+
+t.dropIndex( make(num-1) )
+assert.eq( num - 1 , t.getIndexKeys().length , "B0" )
+
+t.ensureIndex( { z : 1 } )
+assert.eq( num , t.getIndexKeys().length , "B1" )
+
diff --git a/jstests/slow/sharding_balance1.js b/jstests/slow/sharding_balance1.js
new file mode 100644
index 00000000000..184864a2f9f
--- /dev/null
+++ b/jstests/slow/sharding_balance1.js
@@ -0,0 +1,56 @@
+// sharding_balance1.js
+
+
+s = new ShardingTest( "slow_sharding_balance1" , 2 , 2 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.eq( 1 , s.config.chunks.count() , "setup1" );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function dist(){
+ var x = {}
+ s.config.chunks.find( { ns : "test.foo" } ).forEach(
+ function(z){
+ if ( x[z.shard] )
+ x[z.shard]++
+ else
+ x[z.shard] = 1;
+ }
+ );
+ return x;
+}
+
+function diff(){
+ var x = dist();
+ printjson( x )
+ return Math.max( x.shard0 , x.shard1 ) - Math.min( x.shard0 , x.shard1 );
+}
+
+assert.lt( 20 , diff() );
+print( diff() )
+
+assert.soon( function(){
+ var d = diff();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+
+
+s.stop();
diff --git a/s/balance.cpp b/s/balance.cpp
index fdaa839aa73..01be4c1ef8f 100644
--- a/s/balance.cpp
+++ b/s/balance.cpp
@@ -37,11 +37,11 @@ namespace mongo {
bool Balancer::shouldIBalance( DBClientBase& conn ){
BSONObj x = conn.findOne( ShardNS::settings , BSON( "_id" << "balancer" ) );
- log(3) << "balancer: " << x << endl;
+ log(2) << "balancer: " << x << endl;
if ( ! x.isEmpty() ){
if ( x["who"].String() == _myid ){
- log(3) << "balancer: i'm the current balancer" << endl;
+ log(2) << "balancer: i'm the current balancer" << endl;
return true;
}
@@ -49,7 +49,7 @@ namespace mongo {
massert( 13125 , (string)"can't find mongos: " + x["who"].String() , ! other.isEmpty() );
int secsSincePing = (int)(( jsTime() - other["ping"].Date() ) / 1000 );
- log(3) << "current balancer is: " << other << " ping delay(secs): " << secsSincePing << endl;
+ log(2) << "current balancer is: " << other << " ping delay(secs): " << secsSincePing << endl;
if ( secsSincePing < ( 60 * 10 ) ){
return false;
@@ -101,7 +101,7 @@ namespace mongo {
BSONElement shardedColls = db["sharded"];
if ( shardedColls.eoo() ){
- log(3) << "balancer: skipping database with no sharded collection ("
+ log(2) << "balancer: skipping database with no sharded collection ("
<< db["_id"].str() << ")" << endl;
continue;
}
@@ -121,7 +121,7 @@ namespace mongo {
}
bool Balancer::balance( DBClientBase& conn , const string& ns , const BSONObj& data ){
- log(4) << "balancer: balance(" << ns << ")" << endl;
+ log(3) << "balancer: balance(" << ns << ")" << endl;
map< string,vector<BSONObj> > shards;
{
@@ -164,8 +164,8 @@ namespace mongo {
}
}
- log(6) << "min: " << min.first << "\t" << min.second << endl;
- log(6) << "max: " << max.first << "\t" << max.second << endl;
+ log(4) << "min: " << min.first << "\t" << min.second << endl;
+ log(4) << "max: " << max.first << "\t" << max.second << endl;
if( (int)( max.second - min.second) < ( _balancedLastTime ? 2 : 8 ) )
return false;
@@ -179,7 +179,7 @@ namespace mongo {
DBConfig * cfg = grid.getDBConfig( ns );
assert( cfg );
- ChunkManager * cm = cfg->getChunkManager( ns );
+ ChunkManagerPtr cm = cfg->getChunkManager( ns );
assert( cm );
ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
@@ -287,7 +287,7 @@ namespace mongo {
checkOIDs();
while ( ! inShutdown() ){
- sleepsecs( 15 );
+ sleepsecs( 10 );
try {
ScopedDbConnection conn( configServer.getPrimary() );
diff --git a/s/chunk.cpp b/s/chunk.cpp
index 503c046834b..70bae695a14 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -770,7 +770,7 @@ namespace mongo {
}
}
- void ChunkManager::drop(){
+ void ChunkManager::drop( ChunkManagerPtr me ){
rwlock lk( _lock , true );
uassert( 10174 , "config servers not all up" , configServer.allUp() );
diff --git a/s/chunk.h b/s/chunk.h
index 48039703100..3129cab328a 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -29,6 +29,7 @@
#include "../bson/util/atomic_int.h"
#include "shardkey.h"
#include "shard.h"
+#include "config.h"
namespace mongo {
@@ -300,8 +301,11 @@ namespace mongo {
unsigned long long getSequenceNumber(){
return _sequenceNumber;
}
-
- void drop();
+
+ /**
+ * @param me - so i don't get deleted before i'm done
+ */
+ void drop( ChunkManagerPtr me );
private:
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 0569ee2d57c..c5cff2a274e 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -401,7 +401,7 @@ namespace mongo {
return false;
}
- ChunkManager * cm = config->getChunkManager( ns );
+ ChunkManagerPtr cm = config->getChunkManager( ns );
if ( ! cm ){
errmsg = "no chunk manager?";
return false;
@@ -424,7 +424,7 @@ namespace mongo {
;
}
- virtual bool _split( BSONObjBuilder& result , string&errmsg , const string& ns , ChunkManager * manager , ChunkPtr old , BSONObj middle ) = 0;
+ virtual bool _split( BSONObjBuilder& result , string&errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ) = 0;
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ShardConnection::sync();
@@ -451,7 +451,7 @@ namespace mongo {
}
}
- ChunkManager * info = config->getChunkManager( ns );
+ ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr old = info->findChunk( find );
return _split( result , errmsg , ns , info , old , cmdObj.getObjectField( "middle" ) );
@@ -464,7 +464,7 @@ namespace mongo {
class SplitValueCommand : public SplitCollectionHelper {
public:
SplitValueCommand() : SplitCollectionHelper( "splitvalue" ){}
- virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManager * manager , ChunkPtr old , BSONObj middle ){
+ virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){
result << "shardinfo" << old->toString();
@@ -484,7 +484,7 @@ namespace mongo {
class SplitCollection : public SplitCollectionHelper {
public:
SplitCollection() : SplitCollectionHelper( "split" ){}
- virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManager * manager , ChunkPtr old , BSONObj middle ){
+ virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){
assert( old.get() );
log() << "splitting: " << ns << " shard: " << old << endl;
@@ -537,7 +537,7 @@ namespace mongo {
tlog() << "CMD: movechunk: " << cmdObj << endl;
- ChunkManager * info = config->getChunkManager( ns );
+ ChunkManagerPtr info = config->getChunkManager( ns );
ChunkPtr c = info->findChunk( find );
const Shard& from = c->getShard();
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index 18b8fa60014..34250412ab6 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -101,10 +101,10 @@ namespace mongo {
return passthrough( conf , cmdObj , result );
}
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 10418 , "how could chunk manager be null!" , cm );
- cm->drop();
+ cm->drop( cm );
return 1;
}
@@ -184,7 +184,7 @@ namespace mongo {
return true;
}
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 10419 , "how could chunk manager be null!" , cm );
vector<shared_ptr<ChunkRange> > chunks;
@@ -216,7 +216,7 @@ namespace mongo {
}
result.appendBool("sharded", true);
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 12594 , "how could chunk manager be null!" , cm );
set<Shard> servers;
@@ -276,7 +276,7 @@ namespace mongo {
return passthrough( conf , cmdObj , result);
}
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13002 , "how could chunk manager be null!" , cm );
vector<shared_ptr<ChunkRange> > chunks;
@@ -390,7 +390,7 @@ namespace mongo {
return passthrough( conf , cmdObj , result );
}
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 10420 , "how could chunk manager be null!" , cm );
vector<shared_ptr<ChunkRange> > chunks;
@@ -456,7 +456,7 @@ namespace mongo {
return passthrough( conf , cmdObj , result );
}
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
massert( 13091 , "how could chunk manager be null!" , cm );
uassert( 13092 , "GridFS chunks collection can only be sharded on files_id", cm->getShardKey().key() == BSON("files_id" << 1));
@@ -524,7 +524,7 @@ namespace mongo {
BSONObjBuilder timingBuilder;
- ChunkManager * cm = conf->getChunkManager( fullns );
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
BSONObj q;
if ( cmdObj["query"].type() == Object ){
diff --git a/s/config.cpp b/s/config.cpp
index d68007a7bbe..866ae5f8e7e 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -71,13 +71,13 @@ namespace mongo {
_shardingEnabled = true;
}
- ChunkManager* DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ){
+ ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique ){
if ( ! _shardingEnabled )
throw UserException( 8042 , "db doesn't have sharding enabled" );
scoped_lock lk( _lock );
- ChunkManager * info = _shards[ns];
+ ChunkManagerPtr info = _shards[ns];
if ( info )
return info;
@@ -87,7 +87,7 @@ namespace mongo {
log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
_sharded[ns] = CollectionInfo( fieldsAndOrder , unique );
- info = new ChunkManager( this , ns , fieldsAndOrder , unique );
+ info.reset( new ChunkManager( this , ns , fieldsAndOrder , unique ) );
_shards[ns] = info;
return info;
@@ -100,7 +100,7 @@ namespace mongo {
scoped_lock lk( _lock );
- ChunkManager * info = _shards[ns];
+ ChunkManagerPtr info = _shards[ns];
map<string,CollectionInfo>::iterator i = _sharded.find( ns );
if ( info == 0 && i == _sharded.end() ){
@@ -110,21 +110,21 @@ namespace mongo {
uassert( 10180 , "info but no sharded" , i != _sharded.end() );
_sharded.erase( i );
- _shards.erase( ns ); // TODO: clean this up, maybe switch to shared_ptr
+ _shards.erase( ns );
return true;
}
- ChunkManager* DBConfig::getChunkManager( const string& ns , bool reload ){
+ ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool reload ){
scoped_lock lk( _lock );
- ChunkManager* m = _shards[ns];
+ ChunkManagerPtr m = _shards[ns];
if ( m && ! reload )
return m;
uassert( 10181 , (string)"not sharded:" + ns , _isSharded( ns ) );
if ( m && reload )
log() << "reloading shard info for: " << ns << endl;
- m = new ChunkManager( this , ns , _sharded[ ns ].key , _sharded[ns].unique );
+ m.reset( new ChunkManager( this , ns , _sharded[ ns ].key , _sharded[ns].unique ) );
_shards[ns] = m;
return m;
}
@@ -176,7 +176,7 @@ namespace mongo {
Model::save( check );
scoped_lock lk( _lock );
- for ( map<string,ChunkManager*>::iterator i=_shards.begin(); i != _shards.end(); i++)
+ for ( map<string,ChunkManagerPtr>::iterator i=_shards.begin(); i != _shards.end(); i++)
i->second->save();
}
@@ -261,7 +261,7 @@ namespace mongo {
num = 0;
set<string> seen;
while ( true ){
- map<string,ChunkManager*>::iterator i = _shards.begin();
+ map<string,ChunkManagerPtr>::iterator i = _shards.begin();
if ( i == _shards.end() )
break;
@@ -275,7 +275,7 @@ namespace mongo {
log(1) << "\t dropping sharded collection: " << i->first << endl;
i->second->getAllShards( allServers );
- i->second->drop();
+ i->second->drop( i->second );
num++;
uassert( 10184 , "_dropShardedCollections too many collections - bailing" , num < 100000 );
diff --git a/s/config.h b/s/config.h
index 6c3ae6ad5ae..4a7c6a171dd 100644
--- a/s/config.h
+++ b/s/config.h
@@ -47,7 +47,8 @@ namespace mongo {
extern Grid grid;
class ChunkManager;
-
+ typedef shared_ptr<ChunkManager> ChunkManagerPtr;
+
class CollectionInfo {
public:
CollectionInfo( ShardKeyPattern _key = BSONObj() , bool _unique = false ) :
@@ -76,14 +77,14 @@ namespace mongo {
}
void enableSharding();
- ChunkManager* shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique );
+ ChunkManagerPtr shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique );
/**
* @return whether or not this partition is partitioned
*/
bool isSharded( const string& ns );
- ChunkManager* getChunkManager( const string& ns , bool reload = false );
+ ChunkManagerPtr getChunkManager( const string& ns , bool reload = false );
/**
* @return the correct for shard for the ns
@@ -135,7 +136,7 @@ namespace mongo {
bool _shardingEnabled;
map<string,CollectionInfo> _sharded; // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
- map<string,ChunkManager*> _shards; // this will only have entries for things that have been looked at
+ map<string,ChunkManagerPtr> _shards; // this will only have entries for things that have been looked at
mongo::mutex _lock; // TODO: change to r/w lock ??
diff --git a/s/request.cpp b/s/request.cpp
index 96c9c461cca..b048b743a2c 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -58,7 +58,7 @@ namespace mongo {
uassert( 10193 , (string)"no shard info for: " + getns() , _chunkManager );
}
else {
- _chunkManager = 0;
+ _chunkManager.reset();
}
_m.header()->id = _id;
diff --git a/s/request.h b/s/request.h
index 5a5bf638854..a1c7e9e2646 100644
--- a/s/request.h
+++ b/s/request.h
@@ -59,7 +59,7 @@ namespace mongo {
return _config->isShardingEnabled();
}
- ChunkManager * getChunkManager() const {
+ ChunkManagerPtr getChunkManager() const {
return _chunkManager;
}
@@ -99,7 +99,7 @@ namespace mongo {
MSGID _id;
DBConfig * _config;
- ChunkManager * _chunkManager;
+ ChunkManagerPtr _chunkManager;
int _clientId;
ClientInfo * _clientInfo;
diff --git a/s/strategy.cpp b/s/strategy.cpp
index 8c2e4896b27..b1d7351de08 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -170,7 +170,7 @@ namespace mongo {
ShardChunkVersion version = 0;
unsigned long long officialSequenceNumber = 0;
- ChunkManager * manager = 0;
+ ChunkManagerPtr manager;
if ( conf->isSharded( ns ) ){
manager = conf->getChunkManager( ns , authoritative );
officialSequenceNumber = manager->getSequenceNumber();
@@ -183,7 +183,7 @@ namespace mongo {
log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
<< " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
- << " version: " << version << " manager: " << manager
+ << " version: " << version << " manager: " << manager.get()
<< endl;
BSONObj result;
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 0d2043ca8d0..6dcb3c67c97 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -39,7 +39,7 @@ namespace mongo {
if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") )
throw UserException( 8010 , "something is wrong, shouldn't see a command here" );
- ChunkManager * info = r.getChunkManager();
+ ChunkManagerPtr info = r.getChunkManager();
assert( info );
Query query( q.query );
@@ -125,7 +125,7 @@ namespace mongo {
cursorCache.remove( id );
}
- void _insert( Request& r , DbMessage& d, ChunkManager* manager ){
+ void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
while ( d.moreJSObjs() ){
BSONObj o = d.nextJsObj();
@@ -158,7 +158,7 @@ namespace mongo {
}
}
- void _update( Request& r , DbMessage& d, ChunkManager* manager ){
+ void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
int flags = d.pullInt();
BSONObj query = d.nextJsObj();
@@ -233,7 +233,7 @@ namespace mongo {
}
- void _delete( Request& r , DbMessage& d, ChunkManager* manager ){
+ void _delete( Request& r , DbMessage& d, ChunkManagerPtr manager ){
int flags = d.pullInt();
bool justOne = flags & 1;
@@ -267,7 +267,7 @@ namespace mongo {
log(3) << "write: " << ns << endl;
DbMessage& d = r.d();
- ChunkManager * info = r.getChunkManager();
+ ChunkManagerPtr info = r.getChunkManager();
assert( info );
if ( op == dbInsert ){
diff --git a/s/strategy_single.cpp b/s/strategy_single.cpp
index b5e6d2c5248..49a2243869d 100644
--- a/s/strategy_single.cpp
+++ b/s/strategy_single.cpp
@@ -110,7 +110,7 @@ namespace mongo {
! o["unique"].trueValue() ||
r.getConfig()->getChunkManager( ns )->getShardKey().uniqueAllowd( newIndexKey ) );
- ChunkManager * cm = r.getConfig()->getChunkManager( ns );
+ ChunkManagerPtr cm = r.getConfig()->getChunkManager( ns );
assert( cm );
for ( int i=0; i<cm->numChunks();i++)
doWrite( op , r , cm->getChunk(i)->getShard() );
diff --git a/util/hashtab.h b/util/hashtab.h
index c9b4e438dba..80a7241140a 100644
--- a/util/hashtab.h
+++ b/util/hashtab.h
@@ -108,8 +108,11 @@ namespace mongo {
_buf = buf;
//nodes = (Node *) buf;
- assert( sizeof(Node) == 628 );
- //out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << endl;
+ if ( sizeof(Node) != 628 ){
+ out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << endl;
+ assert( sizeof(Node) == 628 );
+ }
+
}
Type* get(const Key& k) {