summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDwight <dmerriman@gmail.com>2009-09-01 16:03:41 -0400
committerDwight <dmerriman@gmail.com>2009-09-01 16:03:41 -0400
commit777c1883c3d019e1318703eb324fa55dff325ad8 (patch)
tree569651b4b080fb1b33ecbc5395221e7e0e305a67
parent10fc6958709b017ca1c7db5f2f6ef44a84c3b25d (diff)
parent184870bf95a09ae696d0c1e2326d5145186b1170 (diff)
downloadmongo-777c1883c3d019e1318703eb324fa55dff325ad8.tar.gz
Merge branch 'master' of git@github.com:mongodb/mongo
-rw-r--r--db/dbcommands.cpp4
-rw-r--r--jstests/sharding/auto1.js8
-rw-r--r--jstests/sharding/auto2.js2
-rw-r--r--jstests/sharding/count1.js4
-rw-r--r--jstests/sharding/diffservers1.js3
-rw-r--r--jstests/sharding/key_string.js4
-rw-r--r--jstests/sharding/moveshard1.js12
-rw-r--r--jstests/sharding/shard1.js4
-rw-r--r--jstests/sharding/shard2.js20
-rw-r--r--jstests/sharding/shard3.js2
-rw-r--r--jstests/sharding/shard4.js4
-rw-r--r--jstests/sharding/shard5.js4
-rw-r--r--jstests/sharding/splitpick.js2
-rw-r--r--s/chunk.cpp58
-rw-r--r--s/chunk.h10
-rw-r--r--s/commands_admin.cpp105
-rw-r--r--s/commands_public.cpp2
-rw-r--r--s/config.cpp42
-rw-r--r--s/config.h28
-rw-r--r--s/d_logic.cpp12
-rw-r--r--s/request.cpp8
-rw-r--r--s/server.cpp2
-rw-r--r--s/strategy.cpp2
-rw-r--r--s/strategy_shard.cpp22
-rw-r--r--scripting/engine_spidermonkey.cpp23
-rw-r--r--shell/dbshell.cpp10
26 files changed, 217 insertions, 180 deletions
diff --git a/db/dbcommands.cpp b/db/dbcommands.cpp
index 06f9103723f..b6f68c368c1 100644
--- a/db/dbcommands.cpp
+++ b/db/dbcommands.cpp
@@ -1358,8 +1358,10 @@ namespace mongo {
}
}
- if ( !valid )
+ if ( !valid ){
anObjBuilder.append("errmsg", "no such cmd");
+ anObjBuilder.append("bad cmd" , _cmdobj );
+ }
anObjBuilder.append("ok", ok?1.0:0.0);
BSONObj x = anObjBuilder.done();
b.append((void*) x.objdata(), x.objsize());
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index e1cab3d27bf..9f53ad060c5 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -22,7 +22,7 @@ s.adminCommand( "connpoolsync" );
primary = s.getServer( "test" ).getDB( "test" );
-assert.eq( 1 , s.config.shard.count() );
+assert.eq( 1 , s.config.chunks.count() );
assert.eq( 500 , primary.foo.count() );
print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
@@ -31,20 +31,20 @@ for ( ; i<800; i++ ){
coll.save( { num : i , s : bigString } );
}
-assert.eq( 1 , s.config.shard.count() );
+assert.eq( 1 , s.config.chunks.count() );
for ( ; i<1500; i++ ){
coll.save( { num : i , s : bigString } );
}
-assert.eq( 3 , s.config.shard.count() , "shard didn't split A " );
+assert.eq( 3 , s.config.chunks.count() , "shard didn't split A " );
s.printShards();
for ( ; i<3000; i++ ){
coll.save( { num : i , s : bigString } );
}
-assert.eq( 4 , s.config.shard.count() , "shard didn't split B " );
+assert.eq( 4 , s.config.chunks.count() , "shard didn't split B " );
s.printShards();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index e59e2632e2f..cc400339b54 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -39,6 +39,6 @@ countb = s._connections[1].getDB( "test" ).foo.count();
assert.eq( j * 100 , counta + countb , "from each a:" + counta + " b:" + countb + " i:" + i );
assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
-assert( Array.unique( s.config.shard.find().toArray().map( function(z){ return z.server; } ) ).length == 2 , "should be using both servers" );
+assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
s.stop();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 70c159fde00..a69716263a7 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -17,7 +17,7 @@ s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
primary = s.getServer( "test" ).getDB( "test" );
seconday = s.getOther( primary ).getDB( "test" );
-assert.eq( 1 , s.config.shard.count() , "sanity check A" );
+assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
db.foo.save( { name : "eliot" } )
db.foo.save( { name : "sara" } )
@@ -35,7 +35,7 @@ s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
assert.eq( 6 , db.foo.find().count() , "basic count after split " );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-s.adminCommand( { moveshard : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 897d3735339..1a2f221fdaf 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -12,5 +12,8 @@ test1.save( { a : 2 } );
test1.save( { a : 3 } );
assert( 3 , test1.count() );
+assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" } ).ok , "host not up" );
+
s.stop();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 78ce3f4ad90..d3e85e0d013 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -9,7 +9,7 @@ s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
primary = s.getServer( "test" ).getDB( "test" );
seconday = s.getOther( primary ).getDB( "test" );
-assert.eq( 1 , s.config.shard.count() , "sanity check A" );
+assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
db.foo.save( { name : "eliot" } )
db.foo.save( { name : "sara" } )
@@ -24,7 +24,7 @@ s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { moveshard : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
s.printShards();
diff --git a/jstests/sharding/moveshard1.js b/jstests/sharding/moveshard1.js
index 18c9c1c9edf..b074b4c987d 100644
--- a/jstests/sharding/moveshard1.js
+++ b/jstests/sharding/moveshard1.js
@@ -1,6 +1,6 @@
-// moveshard1.js
+// movechunk1.js
-s = new ShardingTest( "moveshard1" , 2 );
+s = new ShardingTest( "movechunk1" , 2 );
l = s._connections[0];
r = s._connections[1];
@@ -15,19 +15,19 @@ ldb.things.save( { a : 3 } )
assert.eq( ldb.things.count() , 3 );
assert.eq( rdb.things.count() , 0 );
-startResult = l.getDB( "admin" ).runCommand( { "moveshard.start" : "foo.things" ,
+startResult = l.getDB( "admin" ).runCommand( { "movechunk.start" : "foo.things" ,
"to" : s._serverNames[1] ,
"from" : s._serverNames[0] ,
filter : { a : { $gt : 2 } }
} );
-print( "moveshard.start: " + tojson( startResult ) );
+print( "movechunk.start: " + tojson( startResult ) );
assert( startResult.ok == 1 , "start failed!" );
-finishResult = l.getDB( "admin" ).runCommand( { "moveshard.finish" : "foo.things" ,
+finishResult = l.getDB( "admin" ).runCommand( { "movechunk.finish" : "foo.things" ,
finishToken : startResult.finishToken ,
to : s._serverNames[1] ,
newVersion : 1 } );
-print( "moveshard.finish: " + tojson( finishResult ) );
+print( "movechunk.finish: " + tojson( finishResult ) );
assert( finishResult.ok == 1 , "finishResult failed!" );
assert.eq( rdb.things.count() , 1 , "right has wrong size after move" );
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 9349d209bff..faf0654a0a0 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -21,8 +21,8 @@ s.adminCommand( shardCommand );
dbconfig = s.config.databases.findOne( { name : "test" } );
assert.eq( dbconfig.sharded["test.foo"] , { num : 1 } , "Sharded content" );
-assert.eq( 1 , s.config.shard.count() );
-si = s.config.shard.findOne();
+assert.eq( 1 , s.config.chunks.count() );
+si = s.config.chunks.findOne();
assert( si );
assert.eq( si.ns , "test.foo" );
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index fb2e09bb67f..4b6bf895f4d 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -14,12 +14,12 @@ db = s.getDB( "test" );
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-assert.eq( 1 , s.config.shard.count() , "sanity check 1" );
+assert.eq( 1 , s.config.chunks.count() , "sanity check 1" );
s.adminCommand( { split : "test.foo" , middle : { num : 0 } } );
-assert.eq( 2 , s.config.shard.count() , "should be 2 shards" );
-shards = s.config.shard.find().toArray();
-assert.eq( shards[0].server , shards[1].server , "server should be the same after a split" );
+assert.eq( 2 , s.config.chunks.count() , "should be 2 shards" );
+chunks = s.config.chunks.find().toArray();
+assert.eq( chunks[0].shard , chunks[1].shard , "server should be the same after a split" );
db.foo.save( { num : 1 , name : "eliot" } );
@@ -43,16 +43,16 @@ placeCheck( 2 );
// NOTE: at this point we have 2 shard on 1 server
// test move shard
-assert.throws( function(){ s.adminCommand( { moveshard : "test.foo" , find : { num : 1 } , to : primary.getMongo().name } ); } );
-assert.throws( function(){ s.adminCommand( { moveshard : "test.foo" , find : { num : 1 } , to : "adasd" } ) } );
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : primary.getMongo().name } ); } );
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd" } ) } );
-s.adminCommand( { moveshard : "test.foo" , find : { num : 1 } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : seconday.getMongo().name } );
assert.eq( 2 , seconday.foo.find().length() , "seconday should have 2 after move shard" );
assert.eq( 1 , primary.foo.find().length() , "primary should only have 1 after move shard" );
-assert.eq( 2 , s.config.shard.count() , "still should have 2 shards after move not:" + s.getShardString() );
-shards = s.config.shard.find().toArray();
-assert.neq( shards[0].server , shards[1].server , "servers should NOT be the same after the move" );
+assert.eq( 2 , s.config.chunks.count() , "still should have 2 shards after move not:" + s.getShardString() );
+chunks = s.config.chunks.find().toArray();
+assert.neq( chunks[0].shard , chunks[1].shard , "servers should NOT be the same after the move" );
placeCheck( 3 );
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 7683d674f19..15c314cb50c 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -15,7 +15,7 @@ assert.eq( 3 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
assert.eq( 3 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-s.adminCommand( { moveshard : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
diff --git a/jstests/sharding/shard4.js b/jstests/sharding/shard4.js
index 71e5728b24c..9c3a59783b7 100644
--- a/jstests/sharding/shard4.js
+++ b/jstests/sharding/shard4.js
@@ -19,7 +19,7 @@ assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { moveshard : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
@@ -30,7 +30,7 @@ assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-//s.adminCommand( { moveshard : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+//s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
s.printShards();
print( "* A" );
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
index ebedf5fd592..3652639d73a 100644
--- a/jstests/sharding/shard5.js
+++ b/jstests/sharding/shard5.js
@@ -21,7 +21,7 @@ assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { moveshard : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
@@ -32,7 +32,7 @@ assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-//s.adminCommand( { moveshard : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
+//s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
s.printShards()
print( "* A" );
diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js
index 027fae7ef2e..c4015c5cbba 100644
--- a/jstests/sharding/splitpick.js
+++ b/jstests/sharding/splitpick.js
@@ -25,7 +25,7 @@ s.adminCommand( { split : "test.foo" , find : { a : 1 } } );
assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 99 , "splitvalue 3" );
s.adminCommand( { split : "test.foo" , find : { a : 99 } } );
-assert.eq( s.config.shard.count() , 3 );
+assert.eq( s.config.chunks.count() , 3 );
s.printShards();
assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 50 } } ).middle.a , 10 , "splitvalue 4 " );
diff --git a/s/chunk.cpp b/s/chunk.cpp
index cda23045062..131fcf326f8 100644
--- a/s/chunk.cpp
+++ b/s/chunk.cpp
@@ -35,8 +35,8 @@ namespace mongo {
_dataWritten = 0;
}
- void Chunk::setServer( string s ){
- _server = s;
+ void Chunk::setShard( string s ){
+ _shard = s;
_markModified();
}
@@ -57,7 +57,7 @@ namespace mongo {
}
if ( sort ){
- ScopedDbConnection conn( getServer() );
+ ScopedDbConnection conn( getShard() );
Query q;
if ( sort == 1 )
q.sort( _manager->getShardKey().key() );
@@ -81,7 +81,7 @@ namespace mongo {
return _manager->getShardKey().extractKey( end );
}
- ScopedDbConnection conn( getServer() );
+ ScopedDbConnection conn( getShard() );
BSONObj result;
uassert( "medianKey failed!" , conn->runCommand( "admin" , BSON( "medianKey" << _ns
<< "keyPattern" << _manager->getShardKey().key()
@@ -103,11 +103,11 @@ namespace mongo {
log(1) << " before split on: " << m << "\n"
<< "\t self : " << toString() << endl;
- uassert( "locking namespace on server failed" , lockNamespaceOnServer( getServer() , _ns ) );
+ uassert( "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _ns ) );
Chunk * s = new Chunk( _manager );
s->_ns = _ns;
- s->_server = _server;
+ s->_shard = _shard;
s->_min = m.getOwned();
s->_max = _max;
@@ -129,11 +129,11 @@ namespace mongo {
}
bool Chunk::moveAndCommit( const string& to , string& errmsg ){
- uassert( "can't move shard to its current location!" , to != getServer() );
+ uassert( "can't move shard to its current location!" , to != getShard() );
- log() << "moving shard ns: " << _ns << " moving shard: " << toString() << " " << _server << " -> " << to << endl;
+ log() << "moving chunk ns: " << _ns << " moving chunk: " << toString() << " " << _shard << " -> " << to << endl;
- string from = _server;
+ string from = _shard;
ShardChunkVersion oldVersion = _manager->getVersion( from );
BSONObj filter;
@@ -147,7 +147,7 @@ namespace mongo {
BSONObj startRes;
bool worked = fromconn->runCommand( "admin" ,
- BSON( "moveshard.start" << _ns <<
+ BSON( "movechunk.start" << _ns <<
"from" << from <<
"to" << to <<
"filter" << filter
@@ -156,12 +156,12 @@ namespace mongo {
);
if ( ! worked ){
- errmsg = (string)"moveshard.start failed: " + startRes.toString();
+ errmsg = (string)"movechunk.start failed: " + startRes.toString();
return false;
}
// update config db
- setServer( to );
+ setShard( to );
// need to increment version # for old server
Chunk * randomChunkOnOldServer = _manager->findChunkOnServer( from );
@@ -177,7 +177,7 @@ namespace mongo {
uassert( "version has to be higher" , newVersion > oldVersion );
BSONObjBuilder b;
- b << "moveshard.finish" << _ns;
+ b << "movechunk.finish" << _ns;
b << "to" << to;
b.appendTimestamp( "newVersion" , newVersion );
b.append( startRes["finishToken"] );
@@ -188,7 +188,7 @@ namespace mongo {
}
if ( ! worked ){
- errmsg = (string)"moveshard.finish failed: " + finishRes.toString();
+ errmsg = (string)"movechunk.finish failed: " + finishRes.toString();
return false;
}
@@ -237,14 +237,14 @@ namespace mongo {
if ( ! toMove )
return false;
- string newLocation = grid.pickServerForNewDB();
- if ( newLocation == getServer() ){
+ string newLocation = grid.pickShardForNewDB();
+ if ( newLocation == getShard() ){
// if this is the best server, then we shouldn't do anything!
- log(1) << "not moving shard: " << toString() << " b/c would move to same place " << newLocation << " -> " << getServer() << endl;
+ log(1) << "not moving chunk: " << toString() << " b/c would move to same place " << newLocation << " -> " << getShard() << endl;
return 0;
}
- log() << "moving shard (auto): " << toMove->toString() << " to: " << newLocation << " #objcets: " << toMove->countObjects() << endl;
+ log() << "moving chunk (auto): " << toMove->toString() << " to: " << newLocation << " #objcets: " << toMove->countObjects() << endl;
string errmsg;
massert( (string)"moveAndCommit failed: " + errmsg ,
@@ -254,7 +254,7 @@ namespace mongo {
}
long Chunk::getPhysicalSize(){
- ScopedDbConnection conn( getServer() );
+ ScopedDbConnection conn( getShard() );
BSONObj result;
uassert( "datasize failed!" , conn->runCommand( "admin" , BSON( "datasize" << _ns
@@ -269,7 +269,7 @@ namespace mongo {
long Chunk::countObjects(){
- ScopedDbConnection conn( getServer() );
+ ScopedDbConnection conn( getShard() );
BSONObj result;
@@ -299,14 +299,14 @@ namespace mongo {
to << "ns" << _ns;
to << "min" << _min;
to << "max" << _max;
- to << "server" << _server;
+ to << "shard" << _shard;
}
void Chunk::unserialize(const BSONObj& from){
_ns = from.getStringField( "ns" );
_min = from.getObjectField( "min" ).getOwned();
_max = from.getObjectField( "max" ).getOwned();
- _server = from.getStringField( "server" );
+ _shard = from.getStringField( "shard" );
_lastmod = from.hasField( "lastmod" ) ? from["lastmod"].date() : 0;
uassert( "Chunk needs a ns" , ! _ns.empty() );
@@ -347,14 +347,14 @@ namespace mongo {
}
void Chunk::ensureIndex(){
- ScopedDbConnection conn( getServer() );
+ ScopedDbConnection conn( getShard() );
conn->ensureIndex( _ns , _manager->getShardKey().key() );
conn.done();
}
string Chunk::toString() const {
stringstream ss;
- ss << "shard ns:" << _ns << " server: " << _server << " min: " << _min << " max: " << _max;
+ ss << "shard ns:" << _ns << " shard: " << _shard << " min: " << _min << " max: " << _max;
return ss.str();
}
@@ -386,7 +386,7 @@ namespace mongo {
c->_ns = ns;
c->_min = _key.globalMin();
c->_max = _key.globalMax();
- c->_server = config->getPrimary();
+ c->_shard = config->getPrimary();
c->_markModified();
_chunks.push_back( c );
@@ -424,7 +424,7 @@ namespace mongo {
for ( vector<Chunk*>::const_iterator i=_chunks.begin(); i!=_chunks.end(); i++ ){
Chunk * c = *i;
- if ( c->getServer() == server )
+ if ( c->getShard() == server )
return c;
}
@@ -449,9 +449,9 @@ namespace mongo {
for ( vector<Chunk*>::const_iterator i=_chunks.begin(); i!=_chunks.end(); i++ ){
Chunk * c = *i;
- if ( seen.count( c->getServer() ) )
+ if ( seen.count( c->getShard() ) )
continue;
- seen.insert( c->getServer() );
+ seen.insert( c->getShard() );
c->ensureIndex();
}
}
@@ -479,7 +479,7 @@ namespace mongo {
for ( vector<Chunk*>::const_iterator i=_chunks.begin(); i!=_chunks.end(); i++ ){
Chunk* c = *i;
- if ( c->getServer() != server )
+ if ( c->getShard() != server )
continue;
if ( c->_lastmod > max )
diff --git a/s/chunk.h b/s/chunk.h
index fe3b66e5cf2..e5dae17b719 100644
--- a/s/chunk.h
+++ b/s/chunk.h
@@ -58,10 +58,10 @@ namespace mongo {
return _max;
}
- string getServer(){
- return _server;
+ string getShard(){
+ return _shard;
}
- void setServer( string server );
+ void setShard( string shard );
bool contains( const BSONObj& obj );
@@ -105,7 +105,7 @@ namespace mongo {
bool moveAndCommit( const string& to , string& errmsg );
- virtual const char * getNS(){ return "config.shard"; } // XXX
+ virtual const char * getNS(){ return "config.chunks"; }
virtual void serialize(BSONObjBuilder& to);
virtual void unserialize(const BSONObj& from);
virtual string modelServer();
@@ -128,7 +128,7 @@ namespace mongo {
string _ns;
BSONObj _min;
BSONObj _max;
- string _server;
+ string _shard;
ShardChunkVersion _lastmod;
bool _modified;
diff --git a/s/commands_admin.cpp b/s/commands_admin.cpp
index 325c7dc96d6..7bbdcb6c9e9 100644
--- a/s/commands_admin.cpp
+++ b/s/commands_admin.cpp
@@ -150,7 +150,7 @@ namespace mongo {
return false;
}
- if ( ! grid.knowAboutServer( to ) ){
+ if ( ! grid.knowAboutShard( to ) ){
errmsg = "that server isn't known to me";
return false;
}
@@ -194,9 +194,9 @@ namespace mongo {
}
} movePrimary;
- class PartitionCmd : public GridAdminCmd {
+ class EnableShardingCmd : public GridAdminCmd {
public:
- PartitionCmd() : GridAdminCmd( "enablesharding" ){}
+ EnableShardingCmd() : GridAdminCmd( "enablesharding" ){}
virtual void help( stringstream& help ) const {
help
<< "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
@@ -210,24 +210,24 @@ namespace mongo {
}
DBConfig * config = grid.getDBConfig( dbname );
- if ( config->isPartitioned() ){
+ if ( config->isShardingEnabled() ){
errmsg = "already enabled";
return false;
}
- config->turnOnPartitioning();
+ config->enableSharding();
config->save( true );
result << "ok" << 1;
return true;
}
- } partitionCmd;
+ } enableShardingCmd;
// ------------ collection level commands -------------
- class ShardCmd : public GridAdminCmd {
+ class ShardCollectionCmd : public GridAdminCmd {
public:
- ShardCmd() : GridAdminCmd( "shardcollection" ){}
+ ShardCollectionCmd() : GridAdminCmd( "shardcollection" ){}
virtual void help( stringstream& help ) const {
help
<< "Shard a collection. Sharding must already be enabled for the database.\n"
@@ -241,12 +241,12 @@ namespace mongo {
}
DBConfig * config = grid.getDBConfig( ns );
- if ( ! config->isPartitioned() ){
+ if ( ! config->isShardingEnabled() ){
errmsg = "sharding not enabled for db";
return false;
}
- if ( config->sharded( ns ) ){
+ if ( config->isSharded( ns ) ){
errmsg = "already sharded";
return false;
}
@@ -257,13 +257,13 @@ namespace mongo {
return false;
}
- config->turnOnSharding( ns , key );
+ config->shardCollection( ns , key );
config->save( true );
result << "ok" << 1;
return true;
}
- } shardCmd;
+ } shardCollectionCmd;
class SplitCollectionHelper : public GridAdminCmd {
@@ -287,7 +287,7 @@ namespace mongo {
}
DBConfig * config = grid.getDBConfig( ns );
- if ( ! config->sharded( ns ) ){
+ if ( ! config->isSharded( ns ) ){
errmsg = "ns not sharded. have to shard before can split";
return false;
}
@@ -356,22 +356,22 @@ namespace mongo {
} splitCollectionCmd;
- class MoveShard : public GridAdminCmd {
+ class MoveChunkCmd : public GridAdminCmd {
public:
- MoveShard() : GridAdminCmd( "moveshard" ){}
+ MoveChunkCmd() : GridAdminCmd( "movechunk" ){}
virtual void help( stringstream& help ) const {
- help << "{ moveshard : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
+ help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
}
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
- string ns = cmdObj["moveshard"].valuestrsafe();
+ string ns = cmdObj["movechunk"].valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "no ns";
return false;
}
DBConfig * config = grid.getDBConfig( ns );
- if ( ! config->sharded( ns ) ){
- errmsg = "ns not sharded. have to split before can move a shard";
+ if ( ! config->isSharded( ns ) ){
+ errmsg = "ns not sharded. have to shard before can move a chunk";
return false;
}
@@ -383,37 +383,37 @@ namespace mongo {
string to = cmdObj["to"].valuestrsafe();
if ( ! to.size() ){
- errmsg = "you have to specify where you want to move it";
+ errmsg = "you have to specify where you want to move the chunk";
return false;
}
ChunkManager * info = config->getChunkManager( ns );
- Chunk& s = info->findChunk( find );
- string from = s.getServer();
+ Chunk& c = info->findChunk( find );
+ string from = c.getShard();
- if ( s.getServer() == to ){
- errmsg = "that shard is already on that server";
+ if ( from == to ){
+ errmsg = "that chunk is already on that shard";
return false;
}
- if ( ! grid.knowAboutServer( to ) ){
- errmsg = "that server isn't known to me";
+ if ( ! grid.knowAboutShard( to ) ){
+ errmsg = "that shard isn't known to me";
return false;
}
- if ( ! s.moveAndCommit( to , errmsg ) )
+ if ( ! c.moveAndCommit( to , errmsg ) )
return false;
result << "ok" << 1;
return true;
}
- } moveShardCmd;
+ } moveChunkCmd;
// ------------ server level commands -------------
- class ListServers : public GridAdminCmd {
+ class ListShardsCmd : public GridAdminCmd {
public:
- ListServers() : GridAdminCmd("listservers") { }
+ ListShardsCmd() : GridAdminCmd("listshards") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ScopedDbConnection conn( configServer.getPrimary() );
@@ -424,13 +424,13 @@ namespace mongo {
all.push_back( o );
}
- result.append("servers" , all );
+ result.append("shards" , all );
result.append("ok" , 1 );
conn.done();
return true;
}
- } listServers;
+ } listShardsCmd;
/* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
class AddShard : public GridAdminCmd {
@@ -439,9 +439,9 @@ namespace mongo {
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
ScopedDbConnection conn( configServer.getPrimary() );
- BSONObj server = BSON( "host" << cmdObj["addshard"].valuestrsafe() );
+ BSONObj shard = BSON( "host" << cmdObj["addshard"].valuestrsafe() );
- BSONObj old = conn->findOne( "config.shards" , server );
+ BSONObj old = conn->findOne( "config.shards" , shard );
if ( ! old.isEmpty() ){
result.append( "ok" , 0.0 );
result.append( "msg" , "already exists" );
@@ -449,27 +449,44 @@ namespace mongo {
return false;
}
- conn->insert( "config.shards" , server );
+ try {
+ ScopedDbConnection newShardConn( shard["host"].valuestrsafe() );
+ newShardConn->getLastError();
+ }
+ catch ( DBException& e ){
+ errmsg = "couldn't connect to new shard";
+ result.append( "host" , shard["host"].valuestrsafe() );
+ result.append( "exception" , e.what() );
+ result.append( "ok" , 0 );
+ return false;
+ }
+
+ conn->insert( "config.shards" , shard );
result.append( "ok", 1 );
- result.append( "added" , server["host"].valuestrsafe() );
+ result.append( "added" , shard["host"].valuestrsafe() );
conn.done();
return true;
}
} addServer;
- class RemoveServer : public GridAdminCmd {
+ class RemoveShardCmd : public GridAdminCmd {
public:
- RemoveServer() : GridAdminCmd("removeserver") { }
+ RemoveShardCmd() : GridAdminCmd("removeshard") { }
bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
+ if ( 1 ){
+ errmsg = "removeshard doesn't work";
+ return 0;
+ }
+
ScopedDbConnection conn( configServer.getPrimary() );
- BSONObj server = BSON( "host" << cmdObj["removeserver"].valuestrsafe() );
+ BSONObj server = BSON( "host" << cmdObj["removeshard"].valuestrsafe() );
conn->remove( "config.shards" , server );
conn.done();
return true;
}
- } removeServer;
+ } removeShardCmd;
// --------------- public commands ----------------
@@ -503,13 +520,13 @@ namespace mongo {
}
} ismaster;
- class CmdShardGetPrevError : public Command {
+ class CmdShardingGetPrevError : public Command {
public:
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
}
- CmdShardGetPrevError() : Command("getpreverror") { }
+ CmdShardingGetPrevError() : Command("getpreverror") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
errmsg += "getpreverror not supported on mongos";
result << "ok" << 0;
@@ -517,13 +534,13 @@ namespace mongo {
}
} cmdGetPrevError;
- class CmdShardGetLastError : public Command {
+ class CmdShardingGetLastError : public Command {
public:
virtual bool requiresAuth() { return false; }
virtual bool slaveOk() {
return true;
}
- CmdShardGetLastError() : Command("getplasterror") { }
+ CmdShardingGetLastError() : Command("getplasterror") { }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
errmsg += "getlasterror not working yet";
result << "ok" << 0;
diff --git a/s/commands_public.cpp b/s/commands_public.cpp
index eb97d58b900..b32eae4ba72 100644
--- a/s/commands_public.cpp
+++ b/s/commands_public.cpp
@@ -58,7 +58,7 @@ namespace mongo {
DBConfig * conf = grid.getDBConfig( dbName , false );
- if ( ! conf || ! conf->isPartitioned() || ! conf->sharded( fullns ) ){
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ){
ScopedDbConnection conn( conf->getPrimary() );
result.append( "n" , (double)conn->count( fullns , filter ) );
conn.done();
diff --git a/s/config.cpp b/s/config.cpp
index db8e4ffaae0..e99eb21c30b 100644
--- a/s/config.cpp
+++ b/s/config.cpp
@@ -35,33 +35,33 @@ namespace mongo {
return configServer.modelServer();
}
- bool DBConfig::sharded( const string& ns ){
- if ( ! _partitioned )
+ bool DBConfig::isSharded( const string& ns ){
+ if ( ! _shardingEnabled )
return false;
return _sharded.find( ns ) != _sharded.end();
}
- string DBConfig::getServer( const string& ns ){
- if ( sharded( ns ) )
+ string DBConfig::getShard( const string& ns ){
+ if ( isSharded( ns ) )
return "";
uassert( "no primary!" , _primary.size() );
return _primary;
}
- void DBConfig::turnOnPartitioning(){
- _partitioned = true;
+ void DBConfig::enableSharding(){
+ _shardingEnabled = true;
}
- ChunkManager* DBConfig::turnOnSharding( const string& ns , ShardKeyPattern fieldsAndOrder ){
- if ( ! _partitioned )
- throw UserException( "not partitioned" );
+ ChunkManager* DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder ){
+ if ( ! _shardingEnabled )
+ throw UserException( "db doesn't have sharding enabled" );
ChunkManager * info = _shards[ns];
if ( info )
return info;
- if ( sharded( ns ) )
+ if ( isSharded( ns ) )
throw UserException( "already sharded" );
_sharded[ns] = fieldsAndOrder;
@@ -77,7 +77,7 @@ namespace mongo {
if ( m && ! reload )
return m;
- uassert( (string)"not sharded:" + ns , sharded( ns ) );
+ uassert( (string)"not sharded:" + ns , isSharded( ns ) );
if ( m && reload )
log() << "reloading shard info for: " << ns << endl;
m = new ChunkManager( this , ns , _sharded[ ns ] );
@@ -87,7 +87,7 @@ namespace mongo {
void DBConfig::serialize(BSONObjBuilder& to){
to.append("name", _name);
- to.appendBool("partitioned", _partitioned );
+ to.appendBool("partitioned", _shardingEnabled );
to.append("primary", _primary );
if ( _sharded.size() > 0 ){
@@ -101,7 +101,7 @@ namespace mongo {
void DBConfig::unserialize(const BSONObj& from){
_name = from.getStringField("name");
- _partitioned = from.getBoolField("partitioned");
+ _shardingEnabled = from.getBoolField("partitioned");
_primary = from.getStringField("primary");
_sharded.clear();
@@ -131,7 +131,7 @@ namespace mongo {
/* --- Grid --- */
- string Grid::pickServerForNewDB(){
+ string Grid::pickShardForNewDB(){
ScopedDbConnection conn( configServer.getPrimary() );
// TODO: this is temporary
@@ -150,11 +150,11 @@ namespace mongo {
return all[ rand() % all.size() ];
}
- bool Grid::knowAboutServer( string name ) const{
+ bool Grid::knowAboutShard( string name ) const{
ScopedDbConnection conn( configServer.getPrimary() );
- BSONObj server = conn->findOne( "config.shards" , BSON( "host" << name ) );
+ BSONObj shard = conn->findOne( "config.shards" , BSON( "host" << name ) );
conn.done();
- return ! server.isEmpty();
+ return ! shard.isEmpty();
}
DBConfig* Grid::getDBConfig( string database , bool create ){
@@ -178,7 +178,7 @@ namespace mongo {
if ( database == "admin" )
cc->_primary = configServer.getPrimary();
else
- cc->_primary = pickServerForNewDB();
+ cc->_primary = pickShardForNewDB();
if ( cc->_primary.size() ){
cc->save();
@@ -212,7 +212,7 @@ namespace mongo {
/* --- ConfigServer ---- */
ConfigServer::ConfigServer() {
- _partitioned = false;
+ _shardingEnabled = false;
_primary = "";
_name = "grid";
}
@@ -372,8 +372,8 @@ namespace mongo {
DBConfig c;
testInOut( c , b.obj() );
- assert( c.sharded( "abc.foo" ) );
- assert( ! c.sharded( "abc.food" ) );
+ assert( c.isSharded( "abc.foo" ) );
+ assert( ! c.isSharded( "abc.food" ) );
}
void run(){
diff --git a/s/config.h b/s/config.h
index d13856002f7..ffa386fe3e8 100644
--- a/s/config.h
+++ b/s/config.h
@@ -43,36 +43,36 @@ namespace mongo {
*/
class DBConfig : public Model {
public:
- DBConfig( string name = "" ) : _name( name ) , _primary("") , _partitioned(false){ }
+ DBConfig( string name = "" ) : _name( name ) , _primary("") , _shardingEnabled(false){ }
string getName(){ return _name; };
/**
* @return if anything in this db is partitioned or not
*/
- bool isPartitioned(){
- return _partitioned;
+ bool isShardingEnabled(){
+ return _shardingEnabled;
}
- void turnOnPartitioning();
- ChunkManager* turnOnSharding( const string& ns , ShardKeyPattern fieldsAndOrder );
+ void enableSharding();
+ ChunkManager* shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder );
/**
* @return whether or not this partition is partitioned
*/
- bool sharded( const string& ns );
+ bool isSharded( const string& ns );
ChunkManager* getChunkManager( const string& ns , bool reload = false );
-
+
/**
- * @return the correct for machine for the ns
- * if the namespace is partitioned, will return an empty string
+ * @return the correct for shard for the ns
+ * if the namespace is sharded, will return an empty string
*/
- string getServer( const string& ns );
+ string getShard( const string& ns );
string getPrimary(){
if ( _primary.size() == 0 )
- throw UserException( (string)"no primary server configured for db: " + _name );
+ throw UserException( (string)"no primary shard configured for db: " + _name );
return _primary;
}
@@ -94,7 +94,7 @@ namespace mongo {
protected:
string _name; // e.g. "alleyinsider"
string _primary; // e.g. localhost , mongo.foo.com:9999
- bool _partitioned;
+ bool _shardingEnabled;
map<string,ShardKeyPattern> _sharded; // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
map<string,ChunkManager*> _shards; // this will only have entries for things that have been looked at
@@ -110,9 +110,9 @@ namespace mongo {
*/
DBConfig * getDBConfig( string ns , bool create=true);
- string pickServerForNewDB();
+ string pickShardForNewDB();
- bool knowAboutServer( string name ) const;
+ bool knowAboutShard( string name ) const;
unsigned long long getNextOpTime() const;
private:
diff --git a/s/d_logic.cpp b/s/d_logic.cpp
index 75e42bf3b4f..15c65ad154b 100644
--- a/s/d_logic.cpp
+++ b/s/d_logic.cpp
@@ -243,7 +243,7 @@ namespace mongo {
class MoveShardStartCommand : public MongodShardCommand {
public:
- MoveShardStartCommand() : MongodShardCommand( "moveshard.start" ){}
+ MoveShardStartCommand() : MongodShardCommand( "movechunk.start" ){}
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
@@ -253,7 +253,7 @@ namespace mongo {
// at this point the caller locks me, and updates config db
// then finish calls finish, and then deletes data when cursors are done
- string ns = cmdObj["moveshard.start"].valuestrsafe();
+ string ns = cmdObj["movechunk.start"].valuestrsafe();
string to = cmdObj["to"].valuestrsafe();
string from = cmdObj["from"].valuestrsafe(); // my public address, a tad redundant, but safe
BSONObj filter = cmdObj.getObjectField( "filter" );
@@ -277,7 +277,7 @@ namespace mongo {
return false;
}
- log() << "got moveshard.start: " << cmdObj << endl;
+ log() << "got movechunk.start: " << cmdObj << endl;
BSONObj res;
@@ -296,7 +296,7 @@ namespace mongo {
conn.done();
}
- log() << " moveshard.start res: " << res << endl;
+ log() << " movechunk.start res: " << res << endl;
if ( ok ){
result.append( res["finishToken"] );
@@ -309,7 +309,7 @@ namespace mongo {
class MoveShardFinishCommand : public MongodShardCommand {
public:
- MoveShardFinishCommand() : MongodShardCommand( "moveshard.finish" ){}
+ MoveShardFinishCommand() : MongodShardCommand( "movechunk.finish" ){}
virtual void help( stringstream& help ) const {
help << "should not be calling this directly" << endl;
}
@@ -317,7 +317,7 @@ namespace mongo {
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
// see MoveShardStartCommand::run
- string ns = cmdObj["moveshard.finish"].valuestrsafe();
+ string ns = cmdObj["movechunk.finish"].valuestrsafe();
if ( ns.size() == 0 ){
errmsg = "need ns as cmd value";
return false;
diff --git a/s/request.cpp b/s/request.cpp
index 08f5acdb14c..ade683abb63 100644
--- a/s/request.cpp
+++ b/s/request.cpp
@@ -41,7 +41,7 @@ namespace mongo {
void Request::reset( bool reload ){
_config = grid.getDBConfig( getns() );
- if ( _config->sharded( getns() ) ){
+ if ( _config->isSharded( getns() ) ){
_shardInfo = _config->getChunkManager( getns() , reload );
uassert( (string)"no shard info for: " + getns() , _shardInfo );
}
@@ -57,9 +57,9 @@ namespace mongo {
if ( _shardInfo ){
if ( _shardInfo->numChunks() > 1 )
throw UserException( "can't call singleServerName on a sharded collection" );
- return _shardInfo->findChunk( _shardInfo->getShardKey().globalMin() ).getServer();
+ return _shardInfo->findChunk( _shardInfo->getShardKey().globalMin() ).getShard();
}
- string s = _config->getServer( getns() );
+ string s = _config->getShard( getns() );
uassert( "can't call singleServerName on a sharded collection!" , s.size() > 0 );
return s;
}
@@ -74,7 +74,7 @@ namespace mongo {
Strategy * s = SINGLE;
_d.markSet();
- if ( getConfig()->isPartitioned() && op == dbQuery ){
+ if ( getConfig()->isShardingEnabled() && op == dbQuery ){
// there are a few things we need to check here
// 1. db.eval
// TODO: right now i'm just going to block all
diff --git a/s/server.cpp b/s/server.cpp
index fd5fb21161c..8a8177498f4 100644
--- a/s/server.cpp
+++ b/s/server.cpp
@@ -156,7 +156,7 @@ int main(int argc, char* argv[], char *envp[] ) {
return 1;
}
- log() << argv[0] << " v0.2.0- (alpha 2) starting (--help for usage)" << endl;
+ log() << argv[0] << " v0.2- (alpha 2) starting (--help for usage)" << endl;
printGitVersion();
printSysInfo();
diff --git a/s/strategy.cpp b/s/strategy.cpp
index 7f515056e06..9be53a4b5db 100644
--- a/s/strategy.cpp
+++ b/s/strategy.cpp
@@ -150,7 +150,7 @@ namespace mongo {
if ( ! conf )
return;
- if ( ! conf->sharded( ns ) )
+ if ( ! conf->isSharded( ns ) )
return;
diff --git a/s/strategy_shard.cpp b/s/strategy_shard.cpp
index 044b1c79c34..7096ad28830 100644
--- a/s/strategy_shard.cpp
+++ b/s/strategy_shard.cpp
@@ -30,8 +30,8 @@ namespace mongo {
set<ServerAndQuery> servers;
map<string,int> serverCounts;
for ( vector<Chunk*>::iterator i = shards.begin(); i != shards.end(); i++ ){
- servers.insert( (*i)->getServer() );
- int& num = serverCounts[(*i)->getServer()];
+ servers.insert( (*i)->getShard() );
+ int& num = serverCounts[(*i)->getShard()];
num++;
}
@@ -51,13 +51,13 @@ namespace mongo {
for ( vector<Chunk*>::iterator i = shards.begin(); i != shards.end(); i++ ){
Chunk * s = *i;
BSONObj extra = BSONObj();
- if ( serverCounts[s->getServer()] > 1 ){
+ if ( serverCounts[s->getShard()] > 1 ){
BSONObjBuilder b;
s->getFilter( b );
extra = b.obj();
cout << s->toString() << " -->> " << extra << endl;
}
- buckets.insert( ServerAndQuery( s->getServer() , extra , s->getMin() ) );
+ buckets.insert( ServerAndQuery( s->getShard() , extra , s->getMin() ) );
}
cursor = new SerialServerShardedCursor( buckets , q , shardKeyOrder );
}
@@ -108,8 +108,8 @@ namespace mongo {
}
Chunk& c = manager->findChunk( o );
- log(4) << " server:" << c.getServer() << " " << o << endl;
- insert( c.getServer() , r.getns() , o );
+ log(4) << " server:" << c.getShard() << " " << o << endl;
+ insert( c.getShard() , r.getns() , o );
c.splitIfShould( o.objsize() );
}
@@ -139,7 +139,7 @@ namespace mongo {
}
Chunk& c = manager->findChunk( toupdate );
- doWrite( dbUpdate , r , c.getServer() );
+ doWrite( dbUpdate , r , c.getShard() );
c.splitIfShould( d.msg().data->dataLen() );
}
@@ -154,7 +154,7 @@ namespace mongo {
if ( manager->hasShardKey( pattern ) ){
Chunk& c = manager->findChunk( pattern );
- doWrite( dbDelete , r , c.getServer() );
+ doWrite( dbDelete , r , c.getShard() );
return;
}
@@ -167,10 +167,10 @@ namespace mongo {
set<string> seen;
for ( vector<Chunk*>::iterator i=chunks.begin(); i!=chunks.end(); i++){
Chunk * c = *i;
- if ( seen.count( c->getServer() ) )
+ if ( seen.count( c->getShard() ) )
continue;
- seen.insert( c->getServer() );
- doWrite( dbDelete , r , c->getServer() );
+ seen.insert( c->getShard() );
+ doWrite( dbDelete , r , c->getShard() );
}
}
diff --git a/scripting/engine_spidermonkey.cpp b/scripting/engine_spidermonkey.cpp
index 878473fa164..80b01a5eb21 100644
--- a/scripting/engine_spidermonkey.cpp
+++ b/scripting/engine_spidermonkey.cpp
@@ -1126,7 +1126,7 @@ namespace mongo {
int count;
};
- static JSBool checkTimeout( JSContext *cx, JSScript *script ) {
+ static JSBool _checkTimeout( JSContext *cx ){
TimeoutSpec &spec = *(TimeoutSpec *)( JS_GetContextPrivate( cx ) );
if ( ++spec.count % 1000 != 0 )
return JS_TRUE;
@@ -1136,16 +1136,13 @@ namespace mongo {
}
JS_ReportError( cx, "Timeout exceeded" );
return JS_FALSE;
- }
-#ifdef SM181
-#warning JS_SetOperationCallback not supported yet
- void installCheckTimeout( int timeoutMs ) {
}
-
- void uninstallCheckTimeout( int timeoutMs ){
+ static JSBool checkTimeout( JSContext *cx, JSScript *script ){
+ return _checkTimeout( cx );
}
-#else
+
+
void installCheckTimeout( int timeoutMs ) {
if ( timeoutMs > 0 ) {
TimeoutSpec *spec = new TimeoutSpec;
@@ -1153,18 +1150,26 @@ namespace mongo {
spec->start = boost::posix_time::microsec_clock::local_time();
spec->count = 0;
JS_SetContextPrivate( _context, (void*)spec );
+#ifdef SM181
+ JS_SetOperationCallback( _context, _checkTimeout );
+#else
JS_SetBranchCallback( _context, checkTimeout );
+#endif
}
}
void uninstallCheckTimeout( int timeoutMs ) {
if ( timeoutMs > 0 ) {
+#ifdef SM181
+ JS_SetOperationCallback( _context , 0 );
+#else
JS_SetBranchCallback( _context, 0 );
+#endif
delete (TimeoutSpec *)JS_GetContextPrivate( _context );
JS_SetContextPrivate( _context, 0 );
}
}
-#endif
+
void precall(){
_error = "";
currentScope.reset( this );
diff --git a/shell/dbshell.cpp b/shell/dbshell.cpp
index cec7bfdac54..7d4266feabb 100644
--- a/shell/dbshell.cpp
+++ b/shell/dbshell.cpp
@@ -151,6 +151,12 @@ bool isBalanced( string code ){
for ( size_t i=0; i<code.size(); i++ ){
switch( code[i] ){
+ case '/':
+ if ( i+1 < code.size() && code[i+1] == '/' ){
+ while ( i<code.size() && code[i] != '\n' )
+ i++;
+ }
+ continue;
case '{': brackets++; break;
case '}': brackets--; break;
case '(': parens++; break;
@@ -179,6 +185,10 @@ public:
assert( isBalanced( "function(){\n}" ) );
assert( ! isBalanced( "function(){" ) );
assert( isBalanced( "x = \"{\";" ) );
+ assert( isBalanced( "// {" ) );
+ assert( ! isBalanced( "// \n {" ) );
+ assert( ! isBalanced( "\"//\" {" ) );
+
}
} balnaced_test;