diff options
author | Antonin Kral <a.kral@bobek.cz> | 2011-09-14 17:08:06 +0200 |
---|---|---|
committer | Antonin Kral <a.kral@bobek.cz> | 2011-09-14 17:08:06 +0200 |
commit | 5d342a758c6095b4d30aba0750b54f13b8916f51 (patch) | |
tree | 762e9aa84781f5e3b96db2c02d356c29cf0217c0 /jstests/slowNightly | |
parent | cbe2d992e9cd1ea66af9fa91df006106775d3073 (diff) | |
download | mongodb-5d342a758c6095b4d30aba0750b54f13b8916f51.tar.gz |
Imported Upstream version 2.0.0
Diffstat (limited to 'jstests/slowNightly')
20 files changed, 999 insertions, 25 deletions
diff --git a/jstests/slowNightly/background.js b/jstests/slowNightly/background.js new file mode 100644 index 0000000..d1d0047 --- /dev/null +++ b/jstests/slowNightly/background.js @@ -0,0 +1,51 @@ +// background indexing test during inserts. + +assert( db.getName() == "test" ); + +t = db.bg1; +t.drop(); + +var a = new Mongo( db.getMongo().host ).getDB( db.getName() ); + +for( var i = 0; i < 100000; i++ ) { + t.insert({y:'aaaaaaaaaaaa',i:i}); + if( i % 10000 == 0 ) { + db.getLastError(); + print(i); + } +} + +//db.getLastError(); + +// start bg indexing +a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true}); + +// add more data + +for( var i = 0; i < 100000; i++ ) { + t.insert({i:i}); + if( i % 10000 == 0 ) { + printjson( db.currentOp() ); + db.getLastError(); + print(i); + } +} + +printjson( db.getLastErrorObj() ); + +printjson( db.currentOp() ); + +for( var i = 0; i < 40; i++ ) { + if( db.currentOp().inprog.length == 0 ) + break; + print("waiting"); + sleep(1000); +} + +printjson( a.getLastErrorObj() ); + +var idx = t.getIndexes(); +// print("indexes:"); +// printjson(idx); + +assert( idx[1].key.i == 1 ); diff --git a/jstests/slowNightly/command_line_parsing.js b/jstests/slowNightly/command_line_parsing.js index 38c7324..ba7b136 100644 --- a/jstests/slowNightly/command_line_parsing.js +++ b/jstests/slowNightly/command_line_parsing.js @@ -7,3 +7,15 @@ var baseName = "jstests_slowNightly_command_line_parsing"; var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--notablescan" ); m.getDB( baseName ).getCollection( baseName ).save( {a:1} ); assert.throws( function() { m.getDB( baseName ).getCollection( baseName ).find( {a:1} ).toArray() } ); + +// test config file +var m2 = startMongod( "--port", port+2, "--dbpath", "/data/db/" + baseName +"2", "--config", "jstests/libs/testconfig"); +var m2result = { + "parsed" : { + "config" : "jstests/libs/testconfig", + "dbpath" : "/data/db/jstests_slowNightly_command_line_parsing2", + "fastsync" : "true", + "port" : 31002 + } +}; +assert( friendlyEqual(m2result.parsed, m2.getDB("admin").runCommand( "getCmdLineOpts" ).parsed) ); diff --git a/jstests/slowNightly/dur_big_atomic_update.js b/jstests/slowNightly/dur_big_atomic_update.js index ffb0d83..800b4b8 100644 --- a/jstests/slowNightly/dur_big_atomic_update.js +++ b/jstests/slowNightly/dur_big_atomic_update.js @@ -23,6 +23,23 @@ err = d.getLastErrorObj(); assert(err.err == null); assert(err.n == 1024); +d.dropDatabase(); + +for (var i=0; i<1024; i++){ + d.foo.insert({_id:i}); +} + +// Do it again but in a db.eval +d.eval( + function(host, big_string) { + new Mongo(host).getDB("test").foo.update({}, {$set: {big_string: big_string}}, false, /*multi*/true) + }, conn.host, big_string); // Can't pass in connection or DB objects + +err = d.getLastErrorObj(); + +assert(err.err == null); +assert(err.n == 1024); + // free up space d.dropDatabase(); diff --git a/jstests/slowNightly/dur_remove_old_journals.js b/jstests/slowNightly/dur_remove_old_journals.js index 3c57c12..1e81bee 100644 --- a/jstests/slowNightly/dur_remove_old_journals.js +++ b/jstests/slowNightly/dur_remove_old_journals.js @@ -33,20 +33,19 @@ sleep(sleepSecs*1000); files = listFiles(PATH + "/journal") printjson(files); -
-var nfiles = 0;
-files.forEach(function (file) {
- assert.eq('string', typeof (file.name)); // sanity checking
- if (/prealloc/.test(file.name)) {
- ;
- }
- else {
- nfiles++;
- assert(!(/j\._[01]/.test(file.name)), "Old journal file still exists: " + file.name);
- }
-})
-
-assert.eq(2, nfiles); // j._2 and lsn
+ +var nfiles = 0; +files.forEach(function (file) { + assert.eq('string', typeof (file.name)); // sanity checking + if (/prealloc/.test(file.name)) { + ; + } + else { + nfiles++; + } +}) + +assert.eq(2, nfiles); // latest journal file and lsn stopMongod(30001); diff --git a/jstests/slowNightly/geo_axis_aligned.js b/jstests/slowNightly/geo_axis_aligned.js new file mode 100644 index 0000000..0161ecc --- /dev/null +++ b/jstests/slowNightly/geo_axis_aligned.js @@ -0,0 +1,108 @@ +// Axis aligned circles - hard-to-find precision errors possible with exact distances here + +t = db.axisaligned +t.drop(); + +scale = [ 1, 10, 1000, 10000 ] +bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ] +radius = [ 0.0001, 0.001, 0.01, 0.1 ] +center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ] + +bound = [] +for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] ); + +// Scale all our values to test different sizes +radii = [] +centers = [] +bounds = [] + +for( var s = 0; s < scale.length; s++ ){ + for ( var i = 0; i < radius.length; i++ ) { + radii.push( radius[i] * scale[s] ) + } + + for ( var j = 0; j < center.length; j++ ) { + centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] ) + bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] ) + } + +} + +radius = radii +center = centers +bound = bounds + + +for ( var b = 0; b < bits.length; b++ ) { + + + printjson( radius ) + printjson( centers ) + + for ( var i = 0; i < radius.length; i++ ) { + for ( var j = 0; j < center.length; j++ ) { + + printjson( { center : center[j], radius : radius[i], bits : bits[b] } ); + + t.drop() + + // Make sure our numbers are precise enough for this test + if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) ) + continue; + + t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } ); + t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } ); + t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } ); + t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } ); + t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } ); + + t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } ); + + if( db.getLastError() ) continue; + + print( "DOING WITHIN QUERY ") + r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } ); + + //printjson( r.toArray() ); + + assert.eq( 5, r.count() ); + + // FIXME: surely code like this belongs in utils.js. + a = r.toArray(); + x = []; + for ( k in a ) + x.push( a[k]["_id"] ) + x.sort() + assert.eq( [ 1, 2, 3, 4, 5 ], x ); + + print( " DOING NEAR QUERY ") + //printjson( center[j] ) + r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } ) + assert.eq( 5, r.count() ); + + print( " DOING DIST QUERY ") + + a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results + assert.eq( 5, a.length ); + + //printjson( a ); + + var distance = 0; + for( var k = 0; k < a.length; k++ ){ + //print( a[k].dis ) + //print( distance ) + assert.gte( a[k].dis, distance ); + //printjson( a[k].obj ) + //print( distance = a[k].dis ); + } + + r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } ) + assert.eq( 9, r.count() ); + + } + } +}
\ No newline at end of file diff --git a/jstests/slowNightly/geo_mnypts.js b/jstests/slowNightly/geo_mnypts.js new file mode 100644 index 0000000..ac40651 --- /dev/null +++ b/jstests/slowNightly/geo_mnypts.js @@ -0,0 +1,51 @@ +// Test sanity of geo queries with a lot of points + +var coll = db.testMnyPts +coll.drop() + +var totalPts = 500 * 1000 + +// Add points in a 100x100 grid +for( var i = 0; i < totalPts; i++ ){ + var ii = i % 10000 + coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] }) +} + +coll.ensureIndex({ loc : "2d" }) + +// Check that quarter of points in each quadrant +for( var i = 0; i < 4; i++ ){ + var x = i % 2 + var y = Math.floor( i / 2 ) + + var box = [[0, 0], [49, 49]] + box[0][0] += ( x == 1 ? 50 : 0 ) + box[1][0] += ( x == 1 ? 50 : 0 ) + box[0][1] += ( y == 1 ? 50 : 0 ) + box[1][1] += ( y == 1 ? 50 : 0 ) + + assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() ) + assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() ) + +} + +// Check that half of points in each half +for( var i = 0; i < 2; i++ ){ + + var box = [[0, 0], [49, 99]] + box[0][0] += ( i == 1 ? 50 : 0 ) + box[1][0] += ( i == 1 ? 50 : 0 ) + + assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() ) + assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() ) + +} + +// Check that all but corner set of points in radius +var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ] + +assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() ) +assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() ) + + + diff --git a/jstests/slowNightly/geo_polygon.js b/jstests/slowNightly/geo_polygon.js new file mode 100644 index 0000000..25bf026 --- /dev/null +++ b/jstests/slowNightly/geo_polygon.js @@ -0,0 +1,53 @@ +t = db.geo_polygon4; +t.drop(); + +shouldRun = true; + +bi = db.adminCommand( "buildinfo" ).sysInfo +if ( bi.indexOf( "erh2" ) >= 0 ){ + // this machine runs this test very slowly + // it seems to be related to osx 10.5 + // if this machine gets upgraded, we should remove this check + // the os x debug builders still run thistest, so i'm not worried about it + shouldRun = false; +} + +if ( shouldRun ) { + + num = 0; + for ( x = -180; x < 180; x += .5 ){ + for ( y = -180; y < 180; y += .5 ){ + o = { _id : num++ , loc : [ x , y ] }; + t.save( o ); + } + } + + var numTests = 31; + for( var n = 0; n < numTests; n++ ){ + t.dropIndexes() + t.ensureIndex( { loc : "2d" }, { bits : 2 + n } ); + + assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true); + assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : [ [-180,-180], [-180,180], [180,180], [180,-180] ] } } } ).count() , "Bounding Box Test" ); + + assert.eq( 441 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0] ] } } } ).count() , "Square Test" ); + assert.eq( 25 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0] ] } } } ).count() , "Square Test 2" ); + + if(1){ // SERVER-3726 + // Points exactly on diagonals may be in or out, depending on how the error calculating the slope falls. + assert.between( 341 - 18 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0], [5,5] ] } } } ).count(), 341, "Square Missing Chunk Test", true ); + assert.between( 21 - 2 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0], [1,1] ] } } } ).count(), 21 , "Square Missing Chunk Test 2", true ); + } + + assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [0,0]] }}} ).count() , "Point Test" ); + + // SERVER-3725 + { + assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,0], [2,0]] }}} ).count() , "Line Test 1" ); + assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [1,0]] }}} ).count() , "Line Test 2" ); + assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,2], [0,1], [0,0]] }}} ).count() , "Line Test 3" ); + } + + assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,1], [0,0], [0,0]] }}} ).count() , "Line Test 4" ); + } +} diff --git a/jstests/slowNightly/index_check10.js b/jstests/slowNightly/index_check10.js new file mode 100644 index 0000000..be94be2 --- /dev/null +++ b/jstests/slowNightly/index_check10.js @@ -0,0 +1,133 @@ +// Randomized index testing with initial btree constructed using btree builder. +// Also uses large strings. + +Random.setRandomSeed(); + +t = db.test_index_check10; + +function doIt( indexVersion ) { + + t.drop(); + + function sort() { + var sort = {}; + for( var i = 0; i < n; ++i ) { + sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1; + } + return sort; + } + + var fields = [ 'a', 'b', 'c', 'd', 'e' ]; + n = Random.randInt( 5 ) + 1; + var idx = sort(); + + var chars = "abcdefghijklmnopqrstuvwxyz"; + + function obj() { + var ret = {}; + for( var i = 0; i < n; ++i ) { + ret[ fields[ i ] ] = r(); + } + return ret; + } + + function r() { + var len = Random.randInt( 1000 / n ); + buf = ""; + for( var i = 0; i < len; ++i ) { + buf += chars.charAt( Random.randInt( chars.length ) ); + } + return buf; + } + + function check() { + var v = t.validate(); + if ( !t.valid ) { + printjson( t ); + assert( t.valid ); + } + var spec = {}; + for( var i = 0; i < n; ++i ) { + if ( Random.rand() > 0.5 ) { + var bounds = [ r(), r() ]; + if ( bounds[ 0 ] > bounds[ 1 ] ) { + bounds.reverse(); + } + var s = {}; + if ( Random.rand() > 0.5 ) { + s[ "$gte" ] = bounds[ 0 ]; + } else { + s[ "$gt" ] = bounds[ 0 ]; + } + if ( Random.rand() > 0.5 ) { + s[ "$lte" ] = bounds[ 1 ]; + } else { + s[ "$lt" ] = bounds[ 1 ]; + } + spec[ fields[ i ] ] = s; + } else { + var vals = [] + for( var j = 0; j < Random.randInt( 15 ); ++j ) { + vals.push( r() ); + } + spec[ fields[ i ] ] = { $in: vals }; + } + } + s = sort(); + c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray(); + try { + c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray(); + } catch( e ) { + // may assert if too much data for in memory sort + print( "retrying check..." ); + check(); // retry with different bounds + return; + } + + var j = 0; + for( var i = 0; i < c3.length; ++i ) { + if( friendlyEqual( c1[ j ], c3[ i ] ) ) { + ++j; + } else { + var o = c3[ i ]; + var size = Object.bsonsize( o ); + for( var f in o ) { + size -= f.length; + } + + var max = indexVersion == 0 ? 819 : 818; + + if ( size <= max /* KeyMax */ ) { + assert.eq( c1, c3 , "size: " + size ); + } + } + } + } + + for( var i = 0; i < 10000; ++i ) { + t.save( obj() ); + } + + t.ensureIndex( idx , { v : indexVersion } ); + check(); + + for( var i = 0; i < 10000; ++i ) { + if ( Random.rand() > 0.9 ) { + t.save( obj() ); + } else { + t.remove( obj() ); // improve + } + if( Random.rand() > 0.999 ) { + print( i ); + check(); + } + } + + check(); + +} + +for( var z = 0; z < 5; ++z ) { + var indexVersion = z % 2; + doIt( indexVersion ); +} diff --git a/jstests/slowNightly/index_check9.js b/jstests/slowNightly/index_check9.js index 6634d06..33ce0a6 100644 --- a/jstests/slowNightly/index_check9.js +++ b/jstests/slowNightly/index_check9.js @@ -1,3 +1,5 @@ +// Randomized index testing + Random.setRandomSeed(); t = db.test_index_check9; diff --git a/jstests/slowNightly/replReads.js b/jstests/slowNightly/replReads.js new file mode 100644 index 0000000..4fe9130 --- /dev/null +++ b/jstests/slowNightly/replReads.js @@ -0,0 +1,108 @@ +2// Test that doing slaveOk reads from secondaries hits all the secondaries evenly + +function testReadLoadBalancing(numReplicas) { + + s = new ShardingTest( "replReads" , 1 /* numShards */, 0 /* verboseLevel */, 1 /* numMongos */, { rs : true , numReplicas : numReplicas, chunksize : 1 } ) + + s.adminCommand({enablesharding : "test"}) + s.config.settings.find().forEach(printjson) + + s.adminCommand({shardcollection : "test.foo", key : {_id : 1}}) + + s.getDB("test").foo.insert({a : 123}) + + primary = s._rs[0].test.liveNodes.master + secondaries = s._rs[0].test.liveNodes.slaves + + function rsStats() { + return s.getDB( "admin" ).runCommand( "connPoolStats" )["replicaSets"]["replReads-rs0"]; + } + + assert.eq( numReplicas , rsStats().hosts.length ); + + function isMasterOrSecondary( info ){ + if ( ! info.ok ) + return false; + if ( info.ismaster ) + return true; + return info.secondary && ! info.hidden; + } + + assert.soon( + function() { + var x = rsStats().hosts; + printjson(x) + for ( var i=0; i<x.length; i++ ) + if ( ! isMasterOrSecondary( x[i] ) ) + return false; + return true; + } + ); + + for (var i = 0; i < secondaries.length; i++) { + assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } ) + secondaries[i].getDB('test').setProfilingLevel(2) + } + + for (var i = 0; i < secondaries.length * 10; i++) { + conn = new Mongo(s._mongos[0].host) + conn.setSlaveOk() + conn.getDB('test').foo.findOne() + } + + for (var i = 0; i < secondaries.length; i++) { + var profileCollection = secondaries[i].getDB('test').system.profile; + assert.eq(10, profileCollection.find().count(), "Wrong number of read queries sent to secondary " + i + " " + tojson( profileCollection.find().toArray() )) + } + + db = primary.getDB( "test" ); + + printjson(rs.status()); + c = rs.conf(); + print( "config before: " + tojson(c) ); + for ( i=0; i<c.members.length; i++ ) { + if ( c.members[i].host == db.runCommand( "ismaster" ).primary ) + continue; + c.members[i].hidden = true; + c.members[i].priority = 0; + break; + } + rs.reconfig( c ); + print( "config after: " + tojson( rs.conf() ) ); + + assert.soon( + function() { + var x = rsStats(); + printjson(x); + var numOk = 0; + for ( var i=0; i<x.hosts.length; i++ ) + if ( x.hosts[i].hidden ) + return true; + return false; + } , "one slave not ok" , 180000 , 5000 + ); + + for (var i = 0; i < secondaries.length * 10; i++) { + conn = new Mongo(s._mongos[0].host) + conn.setSlaveOk() + conn.getDB('test').foo.findOne() + } + + var counts = [] + for (var i = 0; i < secondaries.length; i++) { + var profileCollection = secondaries[i].getDB('test').system.profile; + counts.push( profileCollection.find().count() ); + } + + counts = counts.sort(); + assert.eq( 20 , counts[1] - counts[0] , "counts wrong: " + tojson( counts ) ); + + s.stop() +} + +//for (var i = 1; i < 10; i++) { +// testReadLoadBalancing(i) +//} + +// Is there a way that this can be run multiple times with different values? +testReadLoadBalancing(3) diff --git a/jstests/slowNightly/replsets_priority1.js b/jstests/slowNightly/replsets_priority1.js new file mode 100644 index 0000000..3eef5cf --- /dev/null +++ b/jstests/slowNightly/replsets_priority1.js @@ -0,0 +1,173 @@ +// come up with random priorities and make sure that the right member gets +// elected. then kill that member and make sure then next one gets elected. + +load("jstests/replsets/rslib.js"); + +var rs = new ReplSetTest( {name: 'testSet', nodes: 3} ); +var nodes = rs.startSet(); +rs.initiate(); + +var master = rs.getMaster(); + +var everyoneOkSoon = function() { + var status; + assert.soon(function() { + var ok = true; + status = master.adminCommand({replSetGetStatus : 1}); + + if (!status.members) { + return false; + } + + for (var i in status.members) { + if (status.members[i].health == 0) { + continue; + } + ok &= status.members[i].state == 1 || status.members[i].state == 2; + } + return ok; + }, tojson(status)); +}; + +var checkPrimaryIs = function(node) { + var status; + + assert.soon(function() { + var ok = true; + + try { + status = master.adminCommand({replSetGetStatus : 1}); + } + catch(e) { + print(e); + reconnect(master); + status = master.adminCommand({replSetGetStatus : 1}); + } + + var str = "goal: "+node.host+"==1 states: "; + if (!status || !status.members) { + return false; + } + status.members.forEach( function(m) { + str += m.name + ": "+m.state +" "; + + if (m.name == node.host) { + ok &= m.state == 1; + } + else { + ok &= m.state != 1 || (m.state == 1 && m.health == 0); + } + }); + print(str); + + occasionally(function() { + printjson(status); + }, 15); + + return ok; + }, node.host+'==1', 60000, 1000); + + everyoneOkSoon(); +}; + +everyoneOkSoon(); + +// intial sync +master.getDB("foo").bar.insert({x:1}); +rs.awaitReplication(); + +print("starting loop"); + +var n = 5; +for (i=0; i<n; i++) { + print("Round "+i+": FIGHT!"); + + var max = null; + var second = null; + reconnect(master); + var config = master.getDB("local").system.replset.findOne(); + + var version = config.version; + config.version++; + + for (var j=0; j<config.members.length; j++) { + var priority = Math.random()*100; + config.members[j].priority = priority; + + if (!max || priority > max.priority) { + max = config.members[j]; + } + } + + for (var j=0; j<config.members.length; j++) { + if (config.members[j] == max) { + continue; + } + if (!second || config.members[j].priority > second.priority) { + second = config.members[j]; + } + } + + print("max is "+max.host+" with priority "+max.priority+", reconfiguring..."); + + var count = 0; + while (config.version != version && count < 100) { + reconnect(master); + + occasionally(function() { + print("version is "+version+", trying to update to "+config.version); + }); + + try { + master.adminCommand({replSetReconfig : config}); + master = rs.getMaster(); + reconnect(master); + + version = master.getDB("local").system.replset.findOne().version; + } + catch (e) { + print("Caught exception: "+e); + } + + count++; + } + + assert.soon(function() { + rs.getMaster(); + return rs.liveNodes.slaves.length == 2; + }, "2 slaves"); + + assert.soon(function() { + versions = [0,0]; + rs.liveNodes.slaves[0].setSlaveOk(); + versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version; + rs.liveNodes.slaves[1].setSlaveOk(); + versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version; + return versions[0] == config.version && versions[1] == config.version; + }); + + // the reconfiguration needs to be replicated! the hb sends it out + // separately from the repl + rs.awaitReplication(); + + print("reconfigured. Checking statuses."); + + checkPrimaryIs(max); + + rs.stop(max._id); + + var master = rs.getMaster(); + + print("killed max primary. Checking statuses."); + + print("second is "+second.host+" with priority "+second.priority); + checkPrimaryIs(second); + + rs.restart(max._id); + master = rs.getMaster(); + + print("Restarted max. Checking statuses."); + checkPrimaryIs(max); +} + +print("priority1.js SUCCESS!"); diff --git a/jstests/slowNightly/sharding_balance1.js b/jstests/slowNightly/sharding_balance1.js index 9379c4f..c50148c 100644 --- a/jstests/slowNightly/sharding_balance1.js +++ b/jstests/slowNightly/sharding_balance1.js @@ -41,7 +41,8 @@ print( diff() ) assert.soon( function(){ var d = diff(); return d < 5; -} , "balance didn't happen" , 1000 * 60 * 3 , 5000 ); +// Make sure there's enough time here, since balancing can sleep for 15s or so between balances. +} , "balance didn't happen" , 1000 * 60 * 5 , 5000 ); var chunkCount = sum(); s.adminCommand( { removeshard: "shard0000" } ); diff --git a/jstests/slowNightly/sharding_balance4.js b/jstests/slowNightly/sharding_balance4.js index c7f76dd..5288bda 100644 --- a/jstests/slowNightly/sharding_balance4.js +++ b/jstests/slowNightly/sharding_balance4.js @@ -90,8 +90,12 @@ function diff(){ if ( le.err ) print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid ); - assert( le.updatedExisting , "GLE diff 1 myid: " + myid + " " + tojson(le) ) - assert.eq( 1 , le.n , "GLE diff 2 myid: " + myid + " " + tojson(le) ) + if ( ! le.updatedExisting || le.n != 1 ) { + print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + db.foo.findOne( { _id : myid } ) ); + } + + assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) ) + assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) ) if ( Math.random() > .99 ){ diff --git a/jstests/slowNightly/sharding_migrateBigObject.js b/jstests/slowNightly/sharding_migrateBigObject.js new file mode 100644 index 0000000..5ad9ed1 --- /dev/null +++ b/jstests/slowNightly/sharding_migrateBigObject.js @@ -0,0 +1,61 @@ + +var shardA = startMongodEmpty("--shardsvr", "--port", 30001, "--dbpath", "/data/migrateBigger0"); +var shardB = startMongodEmpty("--shardsvr", "--port", 30002, "--dbpath", "/data/migrateBigger1"); +var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/migrateBiggerC"); + +var mongos = startMongos("--port", 30000, "--configdb", "localhost:29999") + +var admin = mongos.getDB("admin") + +admin.runCommand({ addshard : "localhost:30001" }) +admin.runCommand({ addshard : "localhost:30002" }) + +db = mongos.getDB("test"); +var coll = db.getCollection("stuff") + +var data = "x" +var nsq = 16 +var n = 255 + +for( var i = 0; i < nsq; i++ ) data += data + +dataObj = {} +for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data + +for( var i = 0; i < 40; i++ ) { + if(i != 0 && i % 10 == 0) printjson( coll.stats() ) + coll.save({ data : dataObj }) +} +db.getLastError(); + +assert.eq( 40 , coll.count() , "prep1" ); + +printjson( coll.stats() ) + +admin.runCommand({ enablesharding : "" + coll.getDB() }) + +admin.printShardingStatus() + +admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } }) + +assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" ); + +assert.soon( + function(){ + res = mongos.getDB( "config" ).chunks.group( { cond : { ns : "test.stuff" } , + key : { shard : 1 } , + reduce : function( doc , out ){ out.nChunks++; } , + initial : { nChunks : 0 } } ); + + printjson( res ); + return res.length > 1 && Math.abs( res[0].nChunks - res[1].nChunks ) <= 3; + + } , + "never migrated" , 180000 , 1000 ); + +stopMongod( 30000 ); +stopMongod( 29999 ); +stopMongod( 30001 ); +stopMongod( 30002 ); + + diff --git a/jstests/slowNightly/sharding_multiple_ns_rs.js b/jstests/slowNightly/sharding_multiple_ns_rs.js new file mode 100644 index 0000000..3cd7b3e --- /dev/null +++ b/jstests/slowNightly/sharding_multiple_ns_rs.js @@ -0,0 +1,49 @@ + +s = new ShardingTest( "blah" , 1 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { rs : true , chunksize : 1 } ) + +s.adminCommand( { enablesharding : "test" } ); +s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); + +db = s.getDB( "test" ); + +for ( i=0; i<100; i++ ) { + db.foo.insert( { _id : i , x : i } ) + db.bar.insert( { _id : i , x : i } ) +} + +db.getLastError(); + +sh.splitAt( "test.foo" , { _id : 50 } ) + +other = new Mongo( s.s.name ); +dbother = other.getDB( "test" ); + +assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x ); +assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x ); + +assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x ); +assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x ); + + +s._rs[0].test.awaitReplication(); + +s._rs[0].test.stopMaster( 15 , true ) + +sleep( 20 * 1000 ); + +assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x ); +assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x ); + +s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } ); +sh.splitAt( "test.bar" , { _id : 50 } ) + +yetagain = new Mongo( s.s.name ) +assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x ) +assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x ) + +assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x ); +assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x ); + + +s.stop(); + diff --git a/jstests/slowNightly/sharding_passthrough.js b/jstests/slowNightly/sharding_passthrough.js index 81781ca..d81df68 100644 --- a/jstests/slowNightly/sharding_passthrough.js +++ b/jstests/slowNightly/sharding_passthrough.js @@ -1,6 +1,6 @@ -s = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 ); -s.adminCommand( { enablesharding : "test" } ); -db=s.getDB("test"); +myShardingTest = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 ); +myShardingTest.adminCommand( { enablesharding : "test" } ); +db=myShardingTest.getDB("test"); var files = listFiles("jstests"); @@ -9,7 +9,6 @@ var runnerStart = new Date() files.forEach( function(x) { -// /(basic|update).*\.js$/ if ( /[\/\\]_/.test(x.name) || ! /\.js$/.test(x.name ) ){ print(" >>>>>>>>>>>>>>> skipping " + x.name); @@ -63,17 +62,17 @@ files.forEach( * clean (apitest_dbcollection) * logout and getnonce */ - if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|unix_socket\d*)\.js$/.test(x.name)) { + if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile\d*|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|dropdb_race|unix_socket\d*)\.js$/.test(x.name)) { print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name) return; } // These are bugs (some might be fixed now): - if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4)\.js$/.test(x.name)) { + if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4|profile\d*)\.js$/.test(x.name)) { print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name) return; } // These aren't supposed to get run under sharding: - if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|check_shard_index|mr_replaceIntoDB)\.js$/.test(x.name)) { + if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|compact.*|check_shard_index|bench_test.*|mr_replaceIntoDB)\.js$/.test(x.name)) { print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name) return; } @@ -89,6 +88,9 @@ files.forEach( ); +myShardingTest.stop() + var runnerEnd = new Date() print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" ) + diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js index 4ad126e..f73e690 100644 --- a/jstests/slowNightly/sharding_rs1.js +++ b/jstests/slowNightly/sharding_rs1.js @@ -1,6 +1,6 @@ // tests sharding with replica sets -s = new ShardingTest( "rs1" , 3 , 1 , 2 , { rs : true , chunksize : 1 } ) +s = new ShardingTest( "rs1" , 3 /* numShards */, 1 /* verboseLevel */, 2 /* numMongos */, { rs : true , chunksize : 1 } ) s.adminCommand( { enablesharding : "test" } ); @@ -59,6 +59,12 @@ assert.soon( function(){ s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true ); +sleep( 1000 ); + +while ( sh.isBalancerRunning() ){ + sleep( 1000 ); +} + for ( i=0; i<s._rs.length; i++ ){ r = s._rs[i]; r.test.awaitReplication(); diff --git a/jstests/slowNightly/sharding_rs2.js b/jstests/slowNightly/sharding_rs2.js index cd7cf68..4de935b 100644 --- a/jstests/slowNightly/sharding_rs2.js +++ b/jstests/slowNightly/sharding_rs2.js @@ -155,7 +155,29 @@ assert.eq( before.query + 10 , after.query , "E3" ) assert.eq( 100 , ts.count() , "E4" ) assert.eq( 100 , ts.find().itcount() , "E5" ) printjson( ts.find().batchSize(5).explain() ) + +before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +// Careful, mongos can poll the masters here too unrelated to the query, +// resulting in this test failing sporadically if/when there's a delay here. assert.eq( 100 , ts.find().batchSize(5).itcount() , "E6" ) +after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters +assert.eq( before.query + before.getmore , after.query + after.getmore , "E6.1" ) + +assert.eq( 100 , ts.find().batchSize(5).itcount() , "F1" ) + +for ( i=0; i<10; i++ ) { + m = new Mongo( s.s.name ); + m.setSlaveOk(); + ts = m.getDB( "test" ).foo + assert.eq( 100 , ts.find().batchSize(5).itcount() , "F2." + i ) +} + +for ( i=0; i<10; i++ ) { + m = new Mongo( s.s.name ); + ts = m.getDB( "test" ).foo + assert.eq( 100 , ts.find().batchSize(5).itcount() , "F3." + i ) +} + printjson( db.adminCommand( "getShardMap" ) ); diff --git a/jstests/slowNightly/sharding_rs_arb1.js b/jstests/slowNightly/sharding_rs_arb1.js new file mode 100644 index 0000000..be4c4dc --- /dev/null +++ b/jstests/slowNightly/sharding_rs_arb1.js @@ -0,0 +1,40 @@ +x = 5 +name = "sharding_rs_arb1" +replTest = new ReplSetTest( { name : name , nodes : 3 , startPort : 31000 } ); +nodes = replTest.startSet(); +var port = replTest.ports; +replTest.initiate({_id : name, members : + [ + {_id:0, host : getHostName()+":"+port[0]}, + {_id:1, host : getHostName()+":"+port[1]}, + {_id:2, host : getHostName()+":"+port[2], arbiterOnly : true}, + ], + }); + +replTest.awaitReplication(); + +master = replTest.getMaster(); +db = master.getDB( "test" ); +printjson( rs.status() ); + +var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/db/" + name + "_config" ); + +var mongos = startMongos("--port", 30000, "--configdb", getHostName() + ":29999") +var admin = mongos.getDB("admin") +var url = name + "/"; +for ( i=0; i<port.length; i++ ) { + if ( i > 0 ) + url += ","; + url += getHostName() + ":" + port[i]; +} +print( url ) +res = admin.runCommand( { addshard : url } ) +printjson( res ) +assert( res.ok , tojson(res) ) + + + +stopMongod( 30000 ) +stopMongod( 29999 ) +replTest.stopSet(); + diff --git a/jstests/slowNightly/sync6_slow.js b/jstests/slowNightly/sync6_slow.js new file mode 100644 index 0000000..63d6123 --- /dev/null +++ b/jstests/slowNightly/sync6_slow.js @@ -0,0 +1,82 @@ +// More complete version of sharding/sync6.js +// Test that distributed lock forcing does not result in inconsistencies, using a +// fast timeout. + +// Note that this test will always have random factors, since we can't control the +// thread scheduling. + +test = new SyncCCTest( "sync6", { logpath : "/dev/null" } ) + +// Startup another process to handle our commands to the cluster, mostly so it's +// easier to read. +var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} ) +// { logpath : "/data/db/syncCommander/mongod.log" } ); + +// Up the log level for this test +commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 0 } ) + +// Have lots of threads, so use larger i +// Can't test too many, we get socket exceptions... possibly due to the +// javascript console. +// TODO: Figure out our max bounds here - use less threads now to avoid pinger starvation issues. +for ( var t = 0; t < 4; t++ ) { +for ( var i = 4; i < 5; i++ ) { + + // Our force time is 6 seconds - slightly diff from sync6 to ensure exact time not important + var takeoverMS = 6000; + + // Generate valid sleep and skew for this timeout + var threadSleepWithLock = takeoverMS / 2; + var configServerTimeSkew = [ 0, 0, 0 ] + for ( var h = 0; h < 3; h++ ) { + // Skew by 1/30th the takeover time either way, at max + configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 ) + // Make skew pos or neg + configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1; + } + + // Build command + command = { _testDistLockWithSkew : 1 } + + // Basic test parameters + command["lockName"] = "TimeSkewFailNewTest_lock_" + i; + command["host"] = test.url + command["seed"] = i + command["numThreads"] = ( i % 50 ) + 1 + + // Critical values so we're sure of correct operation + command["takeoverMS"] = takeoverMS + command["wait"] = 4 * takeoverMS // so we must force the lock + command["skewHosts"] = configServerTimeSkew + command["threadWait"] = threadSleepWithLock + + // Less critical test params + + // 1/3 of threads will not release the lock + command["hangThreads"] = 3 + // Amount of time to wait before trying lock again + command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4) + // Amount of total clock skew possible between locking threads (processes) + // This can be large now. + command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000 + + // Double-check our sleep, host skew, and takeoverMS values again + + // At maximum, our threads must sleep only half the lock timeout time. + assert( command["threadWait"] <= command["takeoverMS"] / 2 ) + for ( var h = 0; h < command["skewHosts"].length; h++ ) { + // At maximum, our config server time skew needs to be less than 1/30th + // the total time skew (1/60th either way). + assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) ) + } + + result = commandConn.getDB( "admin" ).runCommand( command ) + printjson( result ) + printjson( command ) + assert( result.ok, "Skewed threads did not increment correctly." ); + +} +} + +stopMongoProgram( 30004 ) +test.stop(); |