summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAaron <aaron@10gen.com>2010-08-02 14:26:36 -0700
committerAaron <aaron@10gen.com>2010-08-02 14:26:36 -0700
commit45f9479547d6ca0f263e1d266914a2d113723e65 (patch)
tree4c0357ccd4dec752d365bb5d1cf46a2773797d5f /jstests
parentead8bad5b2b2b95d6a63a40783bf3fdaa215ef65 (diff)
parentdbc11f0fdfd943fbf7bdb21a9040be799a9b4f8e (diff)
downloadmongo-45f9479547d6ca0f263e1d266914a2d113723e65.tar.gz
Merge branch 'master' of github.com:mongodb/mongo
Diffstat (limited to 'jstests')
-rw-r--r--jstests/replsets/replset2.js9
-rw-r--r--jstests/sharding/addshard1.js34
-rw-r--r--jstests/sharding/cursor1.js17
-rw-r--r--jstests/slowNightly/sharding_cursors1.js71
-rw-r--r--jstests/slowWeekly/conc_update.js46
5 files changed, 170 insertions, 7 deletions
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index a5f4c7f75f5..eaa35ee9b87 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -35,7 +35,7 @@ doTest = function( signal ) {
var failed = false;
var callGetLastError = function(w, timeout, db) {
- var result = master.getDB(db).runCommand({getlasterror: 1, w: w, wtimeout: timeout});
+ var result = master.getDB(db).getLastErrorObj( w , timeout );
printjson( result );
if(result['ok'] != 1) {
print("FAILURE");
@@ -51,10 +51,14 @@ doTest = function( signal ) {
master.getDB(testDB).foo.insert({n: 3});
callGetLastError(3, 10000, testDB);
+ print("**** TEMP 1a ****")
+
m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson( m1 );
assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
+ print("**** TEMP 1b ****")
+
var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
@@ -72,6 +76,7 @@ doTest = function( signal ) {
printjson( m1 );
assert( m1['n'] == 1 , "Failed to save to master");
+
var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert( s0['n'] == 1 , "Failed to replicate to slave 0");
@@ -103,4 +108,4 @@ doTest = function( signal ) {
replTest.stopSet( signal );
}
-// doTest( 15 );
+doTest( 15 );
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index 8f5026d8547..f28feedeb3e 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -5,10 +5,17 @@ assert.eq( 1, s.config.shards.count(), "initial server count wrong" );
// create a shard and add a database; if the database is not duplicated the mongod should accepted
// it as shard
conn1 = startMongodTest( 29000 );
+
db1 = conn1.getDB( "testDB" );
-db1.foo.save( {a:1} );
+numObjs = 0;
+for (i=0; i<3; i++){
+ db1.foo.save( { a : i } );
+ numObjs++;
+}
db1.getLastError()
-assert( s.admin.runCommand( { addshard: "localhost:29000" } ).ok, "did not accepted non-duplicated shard" );
+
+newShard = "myShard";
+assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" );
// a mongod with an existing database name should not be allowed to become a shard
conn2 = startMongodTest( 29001 );
@@ -18,15 +25,32 @@ db2.getLastError()
db3 = conn2.getDB( "testDB" );
db3.foo.save( {a:1} );
db3.getLastError()
+
s.config.databases.find().forEach( printjson )
-assert( ! s.admin.runCommand( { addshard: "localhost:29001" } ).ok, "accepted mongod with duplicate db" );
+rejectedShard = "rejectedShard";
+assert( ! s.admin.runCommand( { addshard: "localhost:29001" , name : rejectedShard } ).ok, "accepted mongod with duplicate db" );
// check that all collection that were local to the mongod's are accessible through the mongos
sdb1 = s.getDB( "testDB" );
-assert.eq( 1 , sdb1.foo.count() , "wrong count for database that existed before addshard" );
-sdb2 = s.getDB( "otherDBxx" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count for database that existed before addshard" );
+sdb2 = s.getDB( "otherDB" );
assert.eq( 0 , sdb2.foo.count() , "database of rejected shard appears through mongos" );
+// make sure we can move a DB from the original mongod to a previoulsy existing shard
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), newShard , "DB primary is wrong" );
+origShard = s.getNonPrimaries( "testDB" )[0];
+s.adminCommand( { moveprimary : "testDB" , to : origShard } );
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), origShard , "DB primary didn't move" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
+
+// make sure we can shard the original collections
+sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+s.adminCommand( { enablesharding : "testDB" } );
+s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
+s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
+assert.eq( 2 , s.config.chunks.count(), "wrong chunk number after splitting collection that existed before" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after splitting collection that existed before" );
+
stopMongod( 29000 );
stopMongod( 29001 );
s.stop();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 6b3b3bba51e..41a5d68415c 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -42,4 +42,21 @@ assert.eq( numObjs , cursor1.itcount() , "c1" );
assert.eq( numObjs , cursor2.itcount() , "c2" );
assert.eq( numObjs , cursor3.itcount() , "c3" );
+// test timeout
+gc(); gc();
+cur = db.foo.find().batchSize( 2 )
+assert( cur.next() , "T1" )
+assert( cur.next() , "T2" );
+before = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+printjson( before )
+assert.eq( 1 , before.totalOpen , "TX1" )
+sleep( 6000 )
+assert( cur.next() , "T3" )
+assert( cur.next() , "T4" );
+sleep( 22000 )
+assert.throws( function(){ cur.next(); } , "T5" )
+after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+gc(); gc()
+assert.eq( 0 , after.totalOpen , "TX2" )
+
s.stop()
diff --git a/jstests/slowNightly/sharding_cursors1.js b/jstests/slowNightly/sharding_cursors1.js
new file mode 100644
index 00000000000..307e8d7cc5d
--- /dev/null
+++ b/jstests/slowNightly/sharding_cursors1.js
@@ -0,0 +1,71 @@
+s = new ShardingTest( "cursors1" , 2 , 0 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = "x"
+while (bigString.length < 1024)
+ bigString += bigString;
+assert.eq(bigString.length, 1024, 'len');
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+toInsert = ( 1 * 1000 * 1000 );
+for (var i=0; i < toInsert; i++ ){
+ db.foo.insert( { i: i, r: Math.random(), s: bigString } );
+ assert.eq(db.getLastError(), null, 'no error'); //SERVER-1541
+}
+
+inserted = toInsert;
+for (var i=0; i < 10; i++ ){
+ //assert.gte(db.foo.count(), toInsert, 'inserted enough'); //sometimes fails
+ assert.gte(db.foo.count(), toInsert - 100, 'inserted enough');
+ inserted = Math.min(inserted, db.foo.count())
+ sleep (100);
+}
+
+print("\n\n\n **** inserted: " + inserted + '\n\n\n');
+
+/*
+
+var line = 0;
+try {
+ assert.gte(db.foo.find({}, {_id:1}).itcount(), inserted, 'itcount check - no sort - _id only');
+ line = 1;
+ assert.gte(db.foo.find({}, {_id:1}).sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - _id only');
+ line = 2;
+
+ db.foo.ensureIndex({i:1});
+ db.foo.ensureIndex({r:1});
+ db.getLastError();
+ line = 3;
+
+ assert.gte(db.foo.find({}, {i:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - i only');
+ line = 4;
+ assert.gte(db.foo.find({}, {_id:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - _id only');
+ line = 5;
+
+ assert.gte(db.foo.find({}, {r:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - r only');
+ line = 6;
+ assert.gte(db.foo.find({}, {_id:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - _id only');
+ line = 7;
+
+ assert.gte(db.foo.find().itcount(), inserted, 'itcount check - no sort - full');
+ line = 8;
+ assert.gte(db.foo.find().sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - full');
+ line = 9;
+ assert.gte(db.foo.find().sort({i:1}).itcount(), inserted, 'itcount check - i sort - full');
+ line = 10;
+ assert.gte(db.foo.find().sort({r:1}).itcount(), inserted, 'itcount check - r sort - full');
+ line = 11;
+} catch (e) {
+ print("***** finished through line " + line + " before exception");
+ throw e;
+}
+
+*/
+
+s.stop();
diff --git a/jstests/slowWeekly/conc_update.js b/jstests/slowWeekly/conc_update.js
new file mode 100644
index 00000000000..299259f8224
--- /dev/null
+++ b/jstests/slowWeekly/conc_update.js
@@ -0,0 +1,46 @@
+db = db.getSisterDB("concurrency")
+db.dropDatabase();
+
+NRECORDS=5*1024*1024 // this needs to be relatively big so that
+ // the update() will take a while, but it could
+ // probably be smaller.
+
+print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)")
+for (i=0; i<(NRECORDS); i++) {
+ db.conc.insert({x:i})
+ if ((i%(1024*1024))==0)
+ print("loaded " + i/(1024*1024) + " mibi-records")
+}
+
+print("making an index (this will take a while)")
+db.conc.ensureIndex({x:1})
+
+var c1=db.conc.count({x:{$lt:NRECORDS}})
+// this is just a flag that the child will toggle when it's done.
+db.concflag.update({}, {inprog:true}, true)
+
+updater=startParallelShell("db=db.getSisterDB('concurrency');\
+ db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
+ print(db.getLastError());\
+ db.concflag.update({},{inprog:false})");
+
+querycount=0;
+decrements=0;
+misses=0
+while (1) {
+ if (db.concflag.findOne().inprog) {
+ c2=db.conc.count({x:{$lt:NRECORDS}})
+ print(c2)
+ querycount++;
+ if (c2<c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ } else
+ break;
+ sleep(10);
+}
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+updater() // wait()