summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
Diffstat (limited to 'jstests')
-rw-r--r--jstests/auth/auth1.js8
-rw-r--r--jstests/distinct3.js27
-rw-r--r--jstests/queryoptimizer3.js33
-rw-r--r--jstests/queryoptimizer6.js28
-rw-r--r--jstests/replsets/majority.js60
-rw-r--r--jstests/sharding/inTiming.js58
-rw-r--r--jstests/sharding/sharding_with_keyfile.js10
-rw-r--r--[-rwxr-xr-x]jstests/sharding/sharding_with_keyfile.key0
-rw-r--r--jstests/slowNightly/replReads.js14
9 files changed, 222 insertions, 16 deletions
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index c837085..8639202 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -16,6 +16,14 @@ db.addUser( "eliot" , "eliot" );
db.addUser( "guest" , "guest", true );
db.getSisterDB( "admin" ).addUser( "super", "super" );
+print("make sure we can't run certain commands w/out auth");
+var errmsg = "need to login";
+res = db.adminCommand({getLog : "global"});
+printjson( res );
+assert( ! res.log || res.log.length == 0 , "getLog should fail: " + tojson( res ) )
+assert.eq( res.errmsg , "need to login" , tojson( res ) );
+
+
assert.throws( function() { t.findOne() }, [], "read without login" );
assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
diff --git a/jstests/distinct3.js b/jstests/distinct3.js
new file mode 100644
index 0000000..f945ec9
--- /dev/null
+++ b/jstests/distinct3.js
@@ -0,0 +1,27 @@
+// Yield and delete test case for query optimizer cursor.
+
+t = db.jstests_distinct3;
+t.drop();
+
+t.ensureIndex({a:1});
+t.ensureIndex({b:1});
+
+for( i = 0; i < 50; ++i ) {
+ for( j = 0; j < 20; ++j ) {
+ t.save({a:i,c:i,d:j});
+ }
+}
+for( i = 0; i < 1000; ++i ) {
+ t.save({b:i,c:i+50});
+}
+db.getLastError();
+
+// The idea here is to try and remove the last match for the {a:1} index scan while distinct is yielding.
+p = startParallelShell( 'for( i = 0; i < 2500; ++i ) { db.jstests_distinct3.remove({a:49}); for( j = 0; j < 20; ++j ) { db.jstests_distinct3.save({a:49,c:49,d:j}) } }' );
+
+for( i = 0; i < 100; ++i ) {
+ count = t.distinct( 'c', {$or:[{a:{$gte:0},d:0},{b:{$gte:0}}]} ).length;
+ assert.gt( count, 1000 );
+}
+
+p();
diff --git a/jstests/queryoptimizer3.js b/jstests/queryoptimizer3.js
new file mode 100644
index 0000000..76bc5b6
--- /dev/null
+++ b/jstests/queryoptimizer3.js
@@ -0,0 +1,33 @@
+// Check cases where index scans are aborted due to the collection being dropped.
+
+t = db.jstests_queryoptimizer3;
+t.drop();
+
+p = startParallelShell( 'for( i = 0; i < 400; ++i ) { sleep( 50 ); db.jstests_queryoptimizer3.drop(); }' );
+
+for( i = 0; i < 100; ++i ) {
+ t.drop();
+ t.ensureIndex({a:1});
+ t.ensureIndex({b:1});
+ for( j = 0; j < 100; ++j ) {
+ t.save({a:j,b:j});
+ }
+ m = i % 5;
+ if ( m == 0 ) {
+ t.count({a:{$gte:0},b:{$gte:0}});
+ }
+ else if ( m == 1 ) {
+ t.find({a:{$gte:0},b:{$gte:0}}).itcount();
+ }
+ else if ( m == 2 ) {
+ t.remove({a:{$gte:0},b:{$gte:0}});
+ }
+ else if ( m == 3 ) {
+ t.update({a:{$gte:0},b:{$gte:0}},{});
+ }
+ else if ( m == 4 ) {
+ t.distinct('x',{a:{$gte:0},b:{$gte:0}});
+ }
+}
+
+p();
diff --git a/jstests/queryoptimizer6.js b/jstests/queryoptimizer6.js
new file mode 100644
index 0000000..fce92d7
--- /dev/null
+++ b/jstests/queryoptimizer6.js
@@ -0,0 +1,28 @@
+// Test that $ne constraints are accounted for in QueryPattern. SERVER-4665
+
+t = db.jstests_queryoptimizer6;
+
+function reset() {
+ t.drop();
+ t.save( {a:1} );
+ t.ensureIndex( {b:1}, {sparse:true} );
+}
+
+reset();
+// The sparse index will be used, and recorded for this query pattern.
+assert.eq( 0, t.find( {a:1,b:{$ne:1}} ).itcount() );
+// The query pattern should be different, and the sparse index should not be used.
+assert.eq( 1, t.find( {a:1} ).itcount() );
+
+reset();
+// The sparse index will be used, and (for better or worse) recorded for this query pattern.
+assert.eq( 0, t.find( {a:1} ).min({b:1}).itcount() );
+// The sparse index should not be used, even though the query patterns match.
+assert.eq( 1, t.find( {a:1} ).itcount() );
+
+reset();
+t.ensureIndex( {a:1,b:1} );
+// The sparse index will be used, and (for better or worse) recorded for this query pattern.
+assert.eq( 0, t.find( {a:1,b:null} ).min({b:1}).itcount() );
+// Descriptive test - the recorded {b:1} index is used, because it is not useless.
+assert.eq( 0, t.find( {a:1,b:null} ).itcount() );
diff --git a/jstests/replsets/majority.js b/jstests/replsets/majority.js
index 6df1a41..5bb3cde 100644
--- a/jstests/replsets/majority.js
+++ b/jstests/replsets/majority.js
@@ -1,4 +1,11 @@
-var num = 5;
+var testInsert = function() {
+ master.getDB("foo").bar.insert({x:1});
+ var result = master.getDB("foo").runCommand({getLastError:1, w:"majority", wtimeout:timeout});
+ printjson(result);
+ return result;
+};
+
+var num = 7;
var host = getHostName();
var name = "tags";
var timeout = 10000;
@@ -6,28 +13,57 @@ var timeout = 10000;
var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} );
var nodes = replTest.startSet();
var port = replTest.ports;
-replTest.initiate({_id : name, members :
+var config = {_id : name, members :
[
{_id:0, host : host+":"+port[0], priority : 2},
- {_id:1, host : host+":"+port[1]},
+ {_id:1, host : host+":"+port[1], votes : 3},
{_id:2, host : host+":"+port[2]},
{_id:3, host : host+":"+port[3], arbiterOnly : true},
{_id:4, host : host+":"+port[4], arbiterOnly : true},
+ {_id:5, host : host+":"+port[5], arbiterOnly : true},
+ {_id:6, host : host+":"+port[6], arbiterOnly : true},
],
- });
+ };
+replTest.initiate(config);
replTest.awaitReplication();
-replTest.bridge();
-
-var testInsert = function() {
- master.getDB("foo").bar.insert({x:1});
- var result = master.getDB("foo").runCommand({getLastError:1, w:"majority", wtimeout:timeout});
- printjson(result);
- return result;
-};
var master = replTest.getMaster();
+print("try taking down 4 arbiters");
+replTest.stop(3);
+replTest.stop(4);
+
+replTest.stop(6);
+replTest.remove(6);
+replTest.stop(5);
+replTest.remove(5);
+
+print("should still be able to write to a majority");
+assert.eq(testInsert().err, null);
+
+print("start up some of the arbiters again");
+replTest.restart(3);
+replTest.restart(4);
+
+print("remove 2 of the arbiters");
+config.version = 2;
+config.members.pop();
+config.members.pop();
+
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print("reconfig error: "+e);
+}
+
+replTest.awaitReplication();
+
+replTest.bridge();
+
+master = replTest.getMaster();
+
print("get back in the groove");
testInsert();
replTest.awaitReplication();
diff --git a/jstests/sharding/inTiming.js b/jstests/sharding/inTiming.js
new file mode 100644
index 0000000..51387a6
--- /dev/null
+++ b/jstests/sharding/inTiming.js
@@ -0,0 +1,58 @@
+// Check that shard selection does not take a really long time on $in queries: SERVER-4745
+
+s = new ShardingTest( 'sharding_inqueries', 3, 0, 1, {chunksize:1});
+
+db = s.getDB( 'test' );
+
+s.adminCommand( { enablesharding: 'test' } );
+s.adminCommand( { shardcollection: 'test.foo', key: { a:1, b:1 } } );
+
+var lst = [];
+for (var i = 0; i < 500; i++) { lst.push(i); }
+
+/*
+* Time how long it takes to do $in querys on a sharded and unsharded collection.
+* There is no data in either collection, so the query time is coming almost
+* entirely from the code that selects which shard(s) to send the query to.
+*/
+unshardedQuery = function() {db.bar.find({a:{$in:lst}, b:{$in:lst}}).itcount()};
+shardedQuery = function() {db.foo.find({a:{$in:lst}, b:{$in:lst}}).itcount()};
+// Run queries a few times to warm memory
+for (var i = 0; i < 3; i++) {
+ unshardedQuery();
+ shardedQuery();
+}
+
+unshardedTime = Date.timeFunc(unshardedQuery , 5);
+shardedTime = Date.timeFunc(shardedQuery, 5);
+
+print("Unsharded $in query ran in " + unshardedTime);
+print("Sharded $in query ran in " + shardedTime);
+assert(unshardedTime * 10 > shardedTime, "Sharded query is more than 10 times as slow as unsharded query");
+
+s.getDB('config').settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+
+db.adminCommand({split : "test.foo", middle : { a:1, b:10}});
+db.adminCommand({split : "test.foo", middle : { a:3, b:0}});
+
+db.adminCommand({moveChunk : "test.foo", find : {a:1, b:0}, to : "shard0000"});
+db.adminCommand({moveChunk : "test.foo", find : {a:1, b:15}, to : "shard0001"});
+db.adminCommand({moveChunk : "test.foo", find : {a:3, b:15}, to : "shard0002"});
+
+// Now make sure we get the same results from sharded and unsharded query.
+
+for (var i = 0; i < 20; i++) {
+ db.foo.save({a:1, b:i});
+ db.foo.save({a:2, b:i});
+ db.foo.save({a:3, b:i});
+ db.foo.save({a:4, b:i});
+}
+
+db.printShardingStatus();
+
+assert.eq(6, db.foo.find({a : {$in : [1, 2]}, b : {$in : [0, 3, 5]}}).itcount());
+assert.eq(14, db.foo.find({a : {$in : [1, 2]}, b : {$in : [0, 3, 5, 10, 11, 15, 19]}}).itcount());
+assert.eq(28, db.foo.find({a : {$in : [1, 2, 3, 4]}, b : {$in : [0, 3, 5, 10, 11, 15, 19]}}).itcount());
+assert.eq(14, db.foo.find({a : {$in : [3, 4]}, b : {$in : [0, 3, 5, 10, 11, 15, 19]}}).itcount());
+
+s.stop();
diff --git a/jstests/sharding/sharding_with_keyfile.js b/jstests/sharding/sharding_with_keyfile.js
index 94aea57..bd8d038 100644
--- a/jstests/sharding/sharding_with_keyfile.js
+++ b/jstests/sharding/sharding_with_keyfile.js
@@ -1,9 +1,15 @@
// Tests sharding with a key file
-var st = new ShardingTest({ name : jsTestName(),
+myTestName = "sharding_with_keyfile"
+
+keyFile = "jstests/sharding/" + myTestName + ".key";
+
+run( "chmod" , "600" , keyFile );
+
+var st = new ShardingTest({ name : myTestName ,
shards : 2,
mongos : 1,
- keyFile : keyFile = "jstests/sharding/" + jsTestName() + ".key" })
+ keyFile : keyFile })
// Make sure all our instances got the key
var configs = st._configDB.split(",")
diff --git a/jstests/sharding/sharding_with_keyfile.key b/jstests/sharding/sharding_with_keyfile.key
index fe3344b..fe3344b 100755..100644
--- a/jstests/sharding/sharding_with_keyfile.key
+++ b/jstests/sharding/sharding_with_keyfile.key
diff --git a/jstests/slowNightly/replReads.js b/jstests/slowNightly/replReads.js
index 4fe9130..dadc2c6 100644
--- a/jstests/slowNightly/replReads.js
+++ b/jstests/slowNightly/replReads.js
@@ -43,6 +43,8 @@ function testReadLoadBalancing(numReplicas) {
assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } )
secondaries[i].getDB('test').setProfilingLevel(2)
}
+ // Primary may change with reconfig
+ primary.getDB('test').setProfilingLevel(2)
for (var i = 0; i < secondaries.length * 10; i++) {
conn = new Mongo(s._mongos[0].host)
@@ -75,13 +77,21 @@ function testReadLoadBalancing(numReplicas) {
var x = rsStats();
printjson(x);
var numOk = 0;
+ // Now wait until the host disappears, since now we actually update our
+ // replica sets via isMaster in mongos
+ if( x.hosts.length == rs.conf()["members"].length - 1 ) return true
+ /*
for ( var i=0; i<x.hosts.length; i++ )
if ( x.hosts[i].hidden )
return true;
+ */
return false;
} , "one slave not ok" , 180000 , 5000
);
-
+
+ // Secondaries may change here
+ secondaries = s._rs[0].test.liveNodes.slaves
+
for (var i = 0; i < secondaries.length * 10; i++) {
conn = new Mongo(s._mongos[0].host)
conn.setSlaveOk()
@@ -95,7 +105,7 @@ function testReadLoadBalancing(numReplicas) {
}
counts = counts.sort();
- assert.eq( 20 , counts[1] - counts[0] , "counts wrong: " + tojson( counts ) );
+ assert.eq( 20 , Math.abs( counts[1] - counts[0] ), "counts wrong: " + tojson( counts ) );
s.stop()
}