summaryrefslogtreecommitdiff
path: root/jstests/sharding/auth.js
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-06 15:09:29 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-10-07 17:33:09 -0400
commit488c2d58152ae68a7142b724a815794f46350076 (patch)
tree48fdcb3e35e7e04ee9092c886fcfef101a4f7927 /jstests/sharding/auth.js
parent16f0c718d035f6f3bbfd0db4751cf414f58ab5d1 (diff)
downloadmongo-488c2d58152ae68a7142b724a815794f46350076.tar.gz
SERVER-20787 Cleanup sharding auth test
This change makes the auth.js faster by using much smaller amount of data and writes a separate test, which exercises the auto-split/auto-rebalance logic.
Diffstat (limited to 'jstests/sharding/auth.js')
-rw-r--r--jstests/sharding/auth.js514
1 files changed, 259 insertions, 255 deletions
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 382dc9762e4..a9fbdf2da30 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -1,3 +1,5 @@
+// Tests administrative sharding operations and map-reduce work or fail as expected, when key-based
+// authentication is used
(function() {
'use strict';
@@ -20,7 +22,6 @@ var testUserReadOnly = {
password : "bat"
};
-
function login(userObj, thingToUse) {
if (!thingToUse) {
thingToUse = s;
@@ -43,295 +44,298 @@ function getShardName(rsTest) {
return config._id+"/"+members.join(",");
}
-function runTest(s) {
- print("adding user");
- s.getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles});
-
- login(adminUser);
- assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" },
- { $set: { value : 1 }}, true ));
- printjson(s.getDB("config").settings.find().toArray());
-
- print("restart mongos with different auth options");
-
- s.restartMongos(0, { port: s.port,
- v: 2,
- configdb: s._configDB,
- keyFile: "jstests/libs/key1",
- chunkSize: 1 });
- login(adminUser);
-
- var d1 = new ReplSetTest({name : "d1", nodes : 3, useHostName : true });
- d1.startSet({keyFile : "jstests/libs/key2", verbose : 0});
- d1.initiate();
-
- print("initiated");
- var shardName = authutil.asCluster(d1.nodes,
- "jstests/libs/key2",
- function() { return getShardName(d1); });
-
- print("adding shard w/out auth "+shardName);
- logout(adminUser);
-
- var result = s.getDB("admin").runCommand({addShard : shardName});
- printjson(result);
- assert.eq(result.code, 13);
-
- login(adminUser);
+var s = new ShardingTest({ name: "auth",
+ mongos: 1,
+ shards: 0,
+ verbose: 0,
+ other: {
+ extraOptions: { "keyFile": "jstests/libs/key1" },
+ noChunkSize: true, }
+ });
- print("adding shard w/wrong key "+shardName);
+if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
+ return;
+}
- var thrown = false;
- try {
- result = s.adminCommand({addShard : shardName});
- }
- catch(e) {
- thrown = true;
- printjson(e);
- }
- assert(thrown);
+print("Configuration: Add user " + tojson(adminUser));
+s.getDB(adminUser.db).createUser({user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles});
+login(adminUser);
+
+// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
+assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" },
+ { $set: { value : 1 } },
+ { upsert: true }));
+assert.writeOK(s.getDB( "config" ).settings.update(
+ { _id: "balancer" },
+ { $set: { "_secondaryThrottle" : false,
+ "_waitForDelete" : true } },
+ { upsert: true }));
+
+printjson(s.getDB("config").settings.find().toArray());
+
+print("Restart mongos with different auth options");
+s.restartMongos(0, { port: s.port,
+ v: 2,
+ configdb: s._configDB,
+ keyFile: "jstests/libs/key1",
+ chunkSize: 1 });
+login(adminUser);
+
+var d1 = new ReplSetTest({ name : "d1", nodes : 3, useHostName : true });
+d1.startSet({keyFile : "jstests/libs/key2", verbose : 0});
+d1.initiate();
+
+print("d1 initiated");
+var shardName = authutil.asCluster(d1.nodes,
+ "jstests/libs/key2",
+ function() { return getShardName(d1); });
+
+print("adding shard w/out auth "+shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard : shardName});
+printjson(result);
+assert.eq(result.code, 13);
+
+login(adminUser);
+
+print("adding shard w/wrong key "+shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard : shardName});
+}
+catch(e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
- print("start rs w/correct key");
+print("start rs w/correct key");
- d1.stopSet();
- d1.startSet({keyFile : "jstests/libs/key1", verbose : 0});
- d1.initiate();
+d1.stopSet();
+d1.startSet({keyFile : "jstests/libs/key1", verbose : 0});
+d1.initiate();
- var master = d1.getMaster();
+var master = d1.getMaster();
- print("adding shard w/auth " + shardName);
+print("adding shard w/auth " + shardName);
- result = s.getDB("admin").runCommand({addShard : shardName});
- assert.eq(result.ok, 1, tojson(result));
+result = s.getDB("admin").runCommand({addShard : shardName});
+assert.eq(result.ok, 1, tojson(result));
- s.getDB("admin").runCommand({enableSharding : "test"});
- s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+s.getDB("admin").runCommand({enableSharding : "test"});
+s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
- d1.waitForState( d1.getSecondaries(), d1.SECONDARY, 5 * 60 * 1000 )
+d1.waitForState( d1.getSecondaries(), d1.SECONDARY, 5 * 60 * 1000 )
- s.getDB(testUser.db).createUser({user: testUser.username,
- pwd: testUser.password,
- roles: jsTest.basicUserRoles})
- s.getDB(testUserReadOnly.db).createUser({user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles});
+s.getDB(testUser.db).createUser({user: testUser.username,
+ pwd: testUser.password,
+ roles: jsTest.basicUserRoles})
+s.getDB(testUserReadOnly.db).createUser({user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles});
- logout(adminUser);
+logout(adminUser);
- print("query try");
- var e = assert.throws(function() {
- s.s.getDB("foo").bar.findOne();
- });
- printjson(e);
+print("query try");
+var e = assert.throws(function() {
+ s.s.getDB("foo").bar.findOne();
+});
+printjson(e);
- print("cmd try");
- assert.eq(0, s.s.getDB("foo").runCommand({listDatabases:1}).ok);
+print("cmd try");
+assert.eq(0, s.s.getDB("foo").runCommand({listDatabases:1}).ok);
- print("insert try 1");
- s.getDB("test").foo.insert({x:1});
+print("insert try 1");
+s.getDB("test").foo.insert({x:1});
- login(testUser);
- assert.eq(s.getDB("test").foo.findOne(), null);
+login(testUser);
+assert.eq(s.getDB("test").foo.findOne(), null);
- print("insert try 2");
- assert.writeOK(s.getDB("test").foo.insert({ x: 1 }));
- assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) );
+print("insert try 2");
+assert.writeOK(s.getDB("test").foo.insert({ x: 1 }));
+assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) );
- logout(testUser);
+logout(testUser);
- var d2 = new ReplSetTest({name : "d2", nodes : 3, useHostName : true });
- d2.startSet({keyFile : "jstests/libs/key1", verbose : 0});
- d2.initiate();
- d2.awaitSecondaryNodes();
+var d2 = new ReplSetTest({name : "d2", nodes : 3, useHostName : true });
+d2.startSet({keyFile : "jstests/libs/key1", verbose : 0});
+d2.initiate();
+d2.awaitSecondaryNodes();
- shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1",
- function() { return getShardName(d2); });
+shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1",
+ function() { return getShardName(d2); });
- print("adding shard "+shardName);
- login(adminUser);
- print("logged in");
- result = s.getDB("admin").runCommand({addShard : shardName})
+print("adding shard "+shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard : shardName})
- ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true });
- ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
+ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true });
+ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
- s.getDB("test").foo.remove({})
+s.getDB("test").foo.remove({})
- var num = 100000;
- var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
- for (i=0; i<num; i++) {
- bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
- }
- assert.writeOK(bulk.execute());
-
- s.startBalancer(60000);
-
- var d1Chunks;
- var d2Chunks;
- var totalChunks;
- assert.soon(function() {
- d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
- d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
- totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
-
- print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
-
- return d1Chunks > 0 && d2Chunks > 0 && d1Chunks+d2Chunks == totalChunks;
- }, "Chunks failed to balance: " + d1Chunks+" "+d2Chunks+" "+totalChunks,
- 60000,
- 5000);
-
- //SERVER-3645
- //assert.eq(s.getDB("test").foo.count(), num+1);
- var numDocs = s.getDB("test").foo.find().itcount()
- if (numDocs != num) {
- // Missing documents. At this point we're already in a failure mode, the code in this statement
- // is to get a better idea how/why it's failing.
-
- var numDocsSeen = 0;
- var lastDocNumber = -1;
- var missingDocNumbers = [];
- var docs = s.getDB("test").foo.find().sort({x:1}).toArray();
- for (var i = 0; i < docs.length; i++) {
- if (docs[i].x != lastDocNumber + 1) {
- for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
- missingDocNumbers.push(missing);
- }
+var num = 10000;
+var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
+for (i=0; i<num; i++) {
+ bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
+}
+assert.writeOK(bulk.execute());
+
+s.startBalancer(60000);
+
+assert.soon(function() {
+ var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
+ var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
+ var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+
+ print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+
+ return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
+ },
+ "Chunks failed to balance",
+ 60000,
+ 5000);
+
+//SERVER-3645
+//assert.eq(s.getDB("test").foo.count(), num+1);
+var numDocs = s.getDB("test").foo.find().itcount()
+if (numDocs != num) {
+ // Missing documents. At this point we're already in a failure mode, the code in this statement
+ // is to get a better idea how/why it's failing.
+
+ var numDocsSeen = 0;
+ var lastDocNumber = -1;
+ var missingDocNumbers = [];
+ var docs = s.getDB("test").foo.find().sort({x:1}).toArray();
+ for (var i = 0; i < docs.length; i++) {
+ if (docs[i].x != lastDocNumber + 1) {
+ for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
+ missingDocNumbers.push(missing);
}
- lastDocNumber = docs[i].x;
- numDocsSeen++;
}
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()")
- assert.eq(num - numDocs, missingDocNumbers.length);
-
- load('jstests/libs/trace_missing_docs.js');
-
- for ( var i = 0; i < missingDocNumbers.length; i++ ) {
- jsTest.log( "Tracing doc: " + missingDocNumbers[i] );
- traceMissingDoc( s.getDB( "test" ).foo, { _id : missingDocNumbers[i],
- x : missingDocNumbers[i] } );
- }
-
- assert(false, "Number of docs found does not equal the number inserted. Missing docs: " + missingDocNumbers);
+ lastDocNumber = docs[i].x;
+ numDocsSeen++;
}
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()")
+ assert.eq(num - numDocs, missingDocNumbers.length);
- // We're only sure we aren't duplicating documents iff there's no balancing going on here
- // This call also waits for any ongoing balancing to stop
- s.stopBalancer(60000);
+ load('jstests/libs/trace_missing_docs.js');
- var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
-
- var count = 0;
- while (cursor.hasNext()) {
- cursor.next();
- count++;
+ for ( var i = 0; i < missingDocNumbers.length; i++ ) {
+ jsTest.log( "Tracing doc: " + missingDocNumbers[i] );
+ traceMissingDoc( s.getDB( "test" ).foo, { _id : missingDocNumbers[i],
+ x : missingDocNumbers[i] } );
}
- assert.eq(count, 500);
-
- logout(adminUser);
-
- d1.waitForState( d1.getSecondaries(), d1.SECONDARY, 5 * 60 * 1000 );
- d2.waitForState( d2.getSecondaries(), d2.SECONDARY, 5 * 60 * 1000 );
-
- authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplication(120000); });
- authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); });
-
- // add admin on shard itself, hack to prevent localhost auth bypass
- d1.getMaster().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
- d2.getMaster().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
-
- login(testUser);
- print( "testing map reduce" );
-
- // Sharded map reduce can be tricky since all components talk to each other. For example
- // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
- // properly tested here since addresses are localhost, which is more permissive.
- var res = s.getDB("test").runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.x, 1); },
- reduce : function(key, values) { return values.length; },
- out:"mrout"
- });
- printjson(res);
- assert.commandWorked(res);
-
- // Check that dump doesn't get stuck with auth
- var x = runMongoProgram("mongodump",
- "--host", s.s.host,
- "-d", testUser.db,
- "-u", testUser.username,
- "-p", testUser.password,
- "--authenticationMechanism", "SCRAM-SHA-1");
- print("result: " + x);
-
- // Test read only users
- print( "starting read only tests" );
-
- var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host )
- var readOnlyDB = readOnlyS.getDB( "test" );
-
- print( " testing find that should fail" );
- assert.throws( function(){ readOnlyDB.foo.findOne(); } )
-
- print( " logging in" );
- login( testUserReadOnly , readOnlyS );
-
- print( " testing find that should work" );
- readOnlyDB.foo.findOne();
-
- print( " testing write that should fail" );
- assert.writeError(readOnlyDB.foo.insert({ eliot: 1 }));
-
- print( " testing read command (should succeed)" );
- assert.commandWorked(readOnlyDB.runCommand({count : "foo"}));
-
- print("make sure currentOp/killOp fail");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
-
- // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
- /*
- broken because of SERVER-4156
- print( " testing write command (should fail)" );
- assert.commandFailed(readOnlyDB.runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.y, 1); },
- reduce : function(key, values) { return values.length; },
- out:"blarg"
- }));
- */
-
- print( " testing logout (should succeed)" );
- assert.commandWorked(readOnlyDB.runCommand({logout : 1}));
-
- print("make sure currentOp/killOp fail again");
- assert.commandFailed(readOnlyDB.currentOp());
- assert.commandFailed(readOnlyDB.killOp(123));
+ assert(false, "Number of docs found does not equal the number inserted. Missing docs: " + missingDocNumbers);
}
-var s = new ShardingTest("auth1", 0, 0, 1,
- {
- rs: true,
- extraOptions: { "keyFile": "jstests/libs/key1" },
- noChunkSize: true,
- });
+// We're only sure we aren't duplicating documents iff there's no balancing going on here
+// This call also waits for any ongoing balancing to stop
+s.stopBalancer(60000);
-if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
- print('Skipping test on 32-bit platforms');
-}
-else {
- runTest(s);
+var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
}
+assert.eq(count, 500);
+
+logout(adminUser);
+
+d1.waitForState( d1.getSecondaries(), d1.SECONDARY, 5 * 60 * 1000 );
+d2.waitForState( d2.getSecondaries(), d2.SECONDARY, 5 * 60 * 1000 );
+
+authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplication(120000); });
+authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); });
+
+// add admin on shard itself, hack to prevent localhost auth bypass
+d1.getMaster().getDB(adminUser.db).createUser({user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+d2.getMaster().getDB(adminUser.db).createUser({user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+
+login(testUser);
+print( "testing map reduce" );
+
+// Sharded map reduce can be tricky since all components talk to each other. For example
+// SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+// properly tested here since addresses are localhost, which is more permissive.
+var res = s.getDB("test").runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.x, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"mrout"
+ });
+printjson(res);
+assert.commandWorked(res);
+
+// Check that dump doesn't get stuck with auth
+var x = runMongoProgram("mongodump",
+ "--host", s.s.host,
+ "-d", testUser.db,
+ "-u", testUser.username,
+ "-p", testUser.password,
+ "--authenticationMechanism", "SCRAM-SHA-1");
+print("result: " + x);
+
+// Test read only users
+print( "starting read only tests" );
+
+var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host )
+var readOnlyDB = readOnlyS.getDB( "test" );
+
+print( " testing find that should fail" );
+assert.throws( function(){ readOnlyDB.foo.findOne(); } )
+
+print( " logging in" );
+login( testUserReadOnly , readOnlyS );
+
+print( " testing find that should work" );
+readOnlyDB.foo.findOne();
+
+print( " testing write that should fail" );
+assert.writeError(readOnlyDB.foo.insert({ eliot: 1 }));
+
+print( " testing read command (should succeed)" );
+assert.commandWorked(readOnlyDB.runCommand({count : "foo"}));
+
+print("make sure currentOp/killOp fail");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
+// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
+/*
+broken because of SERVER-4156
+print( " testing write command (should fail)" );
+assert.commandFailed(readOnlyDB.runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.y, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"blarg"
+ }));
+*/
+
+print( " testing logout (should succeed)" );
+assert.commandWorked(readOnlyDB.runCommand({logout : 1}));
+
+print("make sure currentOp/killOp fail again");
+assert.commandFailed(readOnlyDB.currentOp());
+assert.commandFailed(readOnlyDB.killOp(123));
+
s.stop();
})();