summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-09-24 19:01:03 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-09-24 19:01:03 +0200
commit0ad0c09511a04ebe837f2acb859d47f2aa4e038a (patch)
tree109babcb556f6c5884b77853120717f0617c7a1e /jstests
parent03e58f81cad8dd4cfcd1530f327116f0cff6ceb3 (diff)
downloadmongodb-0ad0c09511a04ebe837f2acb859d47f2aa4e038a.tar.gz
Imported Upstream version 1.6.3
Diffstat (limited to 'jstests')
-rw-r--r--jstests/datasize3.js32
-rw-r--r--jstests/disk/diskfull.js5
-rw-r--r--jstests/evalc.js32
-rw-r--r--jstests/geo_circle1.js2
-rw-r--r--jstests/geo_queryoptimizer.js27
-rw-r--r--jstests/mr_sort.js44
-rw-r--r--jstests/numberlong.js28
-rw-r--r--jstests/remove3.js2
-rw-r--r--jstests/remove_justone.js16
-rw-r--r--jstests/repl/repl10.js12
-rw-r--r--jstests/repl/repl12.js47
-rw-r--r--jstests/repl/replacePeer2.js11
-rw-r--r--jstests/repl/snapshot2.js47
-rw-r--r--jstests/replsets/randomcommands1.js29
-rw-r--r--jstests/replsets/replset1.js3
-rw-r--r--jstests/replsets/replset2.js242
-rw-r--r--jstests/replsets/replset4.js71
-rw-r--r--jstests/replsets/replset5.js72
-rw-r--r--jstests/replsets/replset_remove_node.js17
-rw-r--r--jstests/replsets/rollback.js30
-rw-r--r--jstests/replsets/rollback2.js8
-rwxr-xr-xjstests/replsets/rollback3.js224
-rw-r--r--jstests/replsets/sync1.js353
-rwxr-xr-xjstests/replsets/two_initsync.js93
-rw-r--r--jstests/sharding/features3.js2
-rw-r--r--jstests/sharding/shard3.js20
26 files changed, 1101 insertions, 368 deletions
diff --git a/jstests/datasize3.js b/jstests/datasize3.js
new file mode 100644
index 0000000..d45f34b
--- /dev/null
+++ b/jstests/datasize3.js
@@ -0,0 +1,32 @@
+
+t = db.datasize3;
+t.drop()
+
+function run( options ){
+ var c = { dataSize : "test.datasize3" };
+ if ( options )
+ Object.extend( c , options );
+ return db.runCommand( c );
+}
+
+t.insert( { x : 1 } )
+
+a = run()
+b = run( { estimate : true } )
+
+assert.eq( a.size , b.size );
+
+
+t.ensureIndex( { x : 1 } )
+
+for ( i=2; i<100; i++ )
+ t.insert( { x : i } )
+
+a = run( { min : { x : 20 } , max : { x : 50 } } )
+b = run( { min : { x : 20 } , max : { x : 50 } , estimate : true } )
+
+assert.eq( a.size , b.size );
+
+
+
+
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 8057174..6cbcbb7 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -14,9 +14,10 @@ if ( !doIt ) {
if ( doIt ) {
port = allocatePorts( 1 )[ 0 ];
m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/diskfulltest", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- m.getDB( "diskfulltest" ).getCollection( "diskfulltest" ).save( { a: 6 } );
+ c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" )
+ c.save( { a: 6 } );
assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
- assert.soon( function() { return rawMongoProgramOutput().match( /Caught Assertion in insert , continuing/ ); }, "didn't see 'Caught Assertion...'" );
+ assert.isnull( c.findOne() , "shouldn't exist" );
sleep( 3000 );
m2 = new Mongo( m.host );
printjson( m2.getDBs() );
diff --git a/jstests/evalc.js b/jstests/evalc.js
new file mode 100644
index 0000000..59c9467
--- /dev/null
+++ b/jstests/evalc.js
@@ -0,0 +1,32 @@
+t = db.jstests_evalc;
+t.drop();
+
+for( i = 0; i < 10; ++i ) {
+ t.save( {i:i} );
+}
+
+// SERVER-1610
+
+function op() {
+ uri = db.runCommand( "whatsmyuri" ).you;
+ printjson( uri );
+ p = db.currentOp().inprog;
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( o.client == uri ) {
+ print( "found it" );
+ return o.opid;
+ }
+ }
+ return -1;
+}
+
+s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " )
+
+print( "starting eval: " + Date() )
+for ( i=0; i<20000; i++ ){
+ db.eval( "db.jstests_evalc.count( {i:10} );" );
+}
+print( "end eval: " + Date() )
+
+s();
diff --git a/jstests/geo_circle1.js b/jstests/geo_circle1.js
index 9208511..4fe6c5f 100644
--- a/jstests/geo_circle1.js
+++ b/jstests/geo_circle1.js
@@ -36,7 +36,7 @@ for ( i=0; i<searches.length; i++ ){
//printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
assert.eq( correct[i].length , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
- assert.eq( correct[i].length , t.find( q ).itcount() , "count : " + tojson( searches[i] ) );
+ assert.eq( correct[i].length , t.find( q ).count() , "count : " + tojson( searches[i] ) );
assert.gt( correct[i].length * 2 , t.find(q).explain().nscanned , "nscanned : " + tojson( searches[i] ) )
}
diff --git a/jstests/geo_queryoptimizer.js b/jstests/geo_queryoptimizer.js
new file mode 100644
index 0000000..7a438bc
--- /dev/null
+++ b/jstests/geo_queryoptimizer.js
@@ -0,0 +1,27 @@
+
+t = db.geo_qo1;
+t.drop()
+
+t.ensureIndex({loc:"2d"})
+
+t.insert({'issue':0})
+t.insert({'issue':1})
+t.insert({'issue':2})
+t.insert({'issue':2, 'loc':[30.12,-118]})
+t.insert({'issue':1, 'loc':[30.12,-118]})
+t.insert({'issue':0, 'loc':[30.12,-118]})
+
+assert.eq( 6 , t.find().itcount() , "A1" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "A2" )
+
+assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "A3" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "B1" )
+
+assert.eq( 6 , t.find().itcount() , "B2" )
+
+assert.eq( 2 , t.find({'issue':0}).itcount() , "B3" )
+
+assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "B4" )
+
diff --git a/jstests/mr_sort.js b/jstests/mr_sort.js
new file mode 100644
index 0000000..7692062
--- /dev/null
+++ b/jstests/mr_sort.js
@@ -0,0 +1,44 @@
+
+t = db.mr_sort;
+t.drop()
+
+t.ensureIndex( { x : 1 } )
+
+t.insert( { x : 1 } )
+t.insert( { x : 10 } )
+t.insert( { x : 2 } )
+t.insert( { x : 9 } )
+t.insert( { x : 3 } )
+t.insert( { x : 8 } )
+t.insert( { x : 4 } )
+t.insert( { x : 7 } )
+t.insert( { x : 5 } )
+t.insert( { x : 6 } )
+
+m = function(){
+ emit( "a" , this.x )
+}
+
+r = function( k , v ){
+ return Array.sum( v )
+}
+
+
+res = t.mapReduce( m , r );
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 55 } , x , "A1" )
+
+res = t.mapReduce( m , r , { query : { x : { $lt : 3 } } } )
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 3 } , x , "A2" )
+
+res = t.mapReduce( m , r , { sort : { x : 1 } , limit : 2 } );
+x = res.convertToSingleObject();
+res.drop();
+assert.eq( { "a" : 3 } , x , "A3" )
+
+
+
+
diff --git a/jstests/numberlong.js b/jstests/numberlong.js
index 848ef87..1cbbc7a 100644
--- a/jstests/numberlong.js
+++ b/jstests/numberlong.js
@@ -4,50 +4,50 @@ n = new NumberLong( 4 );
assert.eq.automsg( "4", "n" );
assert.eq.automsg( "4", "n.toNumber()" );
assert.eq.automsg( "8", "n + 4" );
-assert.eq.automsg( "'NumberLong( 4 )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( 4 )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(4)'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(4)'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( 4 ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(4) }'", "p" );
-assert.eq.automsg( "NumberLong( 4 )", "eval( tojson( NumberLong( 4 ) ) )" );
+assert.eq.automsg( "NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )" );
assert.eq.automsg( "a", "eval( tojson( a ) )" );
n = new NumberLong( -4 );
assert.eq.automsg( "-4", "n" );
assert.eq.automsg( "-4", "n.toNumber()" );
assert.eq.automsg( "0", "n + 4" );
-assert.eq.automsg( "'NumberLong( -4 )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( -4 )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(-4)'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(-4)'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( -4 ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(-4) }'", "p" );
// too big to fit in double
n = new NumberLong( "11111111111111111" );
assert.eq.automsg( "11111111111111112", "n.toNumber()" );
assert.eq.automsg( "11111111111111116", "n + 4" );
-assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( \"11111111111111111\" ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(\"11111111111111111\") }'", "p" );
-assert.eq.automsg( "NumberLong( '11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
+assert.eq.automsg( "NumberLong('11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
assert.eq.automsg( "a", "eval( tojson( a ) )" );
n = new NumberLong( "-11111111111111111" );
assert.eq.automsg( "-11111111111111112", "n.toNumber()" );
assert.eq.automsg( "-11111111111111108", "n + 4" );
-assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "n.toString()" );
-assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "tojson( n )" );
+assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "n.toString()" );
+assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "tojson( n )" );
a = {}
a.a = n;
p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong( \"-11111111111111111\" ) }'", "p" );
+assert.eq.automsg( "'{ \"a\" : NumberLong(\"-11111111111111111\") }'", "p" );
// parsing
assert.throws.automsg( function() { new NumberLong( "" ); } );
diff --git a/jstests/remove3.js b/jstests/remove3.js
index fe1a754..2a51a6e 100644
--- a/jstests/remove3.js
+++ b/jstests/remove3.js
@@ -14,5 +14,5 @@ assert.eq( 4 , t.count() , "B" );
t.remove( { _id : 5 } );
assert.eq( 3 , t.count() , "C" );
-t.remove( { _id : { $lt : 8 } } , "D" );
+t.remove( { _id : { $lt : 8 } } );
assert.eq( 1 , t.count() , "D" );
diff --git a/jstests/remove_justone.js b/jstests/remove_justone.js
new file mode 100644
index 0000000..e412a13
--- /dev/null
+++ b/jstests/remove_justone.js
@@ -0,0 +1,16 @@
+
+t = db.remove_justone
+t.drop()
+
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+t.insert( { x : 1 } )
+
+assert.eq( 4 , t.count() )
+
+t.remove( { x : 1 } , true )
+assert.eq( 3 , t.count() )
+
+t.remove( { x : 1 } )
+assert.eq( 0 , t.count() )
diff --git a/jstests/repl/repl10.js b/jstests/repl/repl10.js
index 67c5db1..cc7cf12 100644
--- a/jstests/repl/repl10.js
+++ b/jstests/repl/repl10.js
@@ -26,13 +26,15 @@ doTest = function( signal ) {
am.save( {i:2} );
assert.eq( 2, am.count() );
sleep( 3000 );
-
- rt.stop( true, signal );
- sleep( 3000 );
assert.eq( 1, s.getDB( baseName ).a.count() );
+ soonCount( 2 );
+
rt.stop();
}
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
+print("repl10.js dotest(15)");
+doTest(15); // SIGTERM
+print("repl10.js dotest(15)");
+doTest(9); // SIGKILL
+print("repl10.js SUCCESS");
diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js
new file mode 100644
index 0000000..586aa53
--- /dev/null
+++ b/jstests/repl/repl12.js
@@ -0,0 +1,47 @@
+// SERVER-1626
+// check for initial sync of multiple db's
+
+function debug( x ) {
+ print( "DEBUG:" + tojson( x ) );
+}
+
+rt = new ReplTest( "repl12tests" );
+
+m = rt.start( true );
+
+usedDBs = []
+
+a = "a"
+for( i = 0; i < 3; ++i ) {
+ usedDBs.push( a )
+ m.getDB( a ).c.save( {} );
+ a += "a";
+}
+m.getDB(a).getLastError();
+
+//print("\n\n\n DB NAMES MASTER:");
+//printjson(m.getDBNames());
+
+var z = 10500;
+print("sleeping " + z + "ms");
+sleep(z);
+
+s = rt.start(false);
+
+function countHave(){
+ var have = 0;
+ for ( var i=0; i<usedDBs.length; i++ ){
+ if ( s.getDB( usedDBs[i] ).c.findOne() )
+ have++;
+ }
+ return have;
+}
+
+assert.soon(
+ function() {
+ var c = countHave();
+ debug( "count: " + c );
+ return c == 3; }
+);
+
+//printjson(s.getDBNames());
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
index c2983dc..33b054a 100644
--- a/jstests/repl/replacePeer2.js
+++ b/jstests/repl/replacePeer2.js
@@ -44,15 +44,8 @@ doTest = function( signal ) {
checkWrite( rp.master(), rp.slave() );
// allow slave to finish initial sync
- assert.soon(
- function() {
- var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
- if ( res.ok == 1 )
- return true;
- printjson( res );
- return false;
- }
- );
+ var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
+ assert( res.ok , "replacepeer didn't finish: " + tojson( res ) );
// Should not be saved to slave.
writeOne( rp.master() );
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
index d65cad7..60b3531 100644
--- a/jstests/repl/snapshot2.js
+++ b/jstests/repl/snapshot2.js
@@ -1,4 +1,6 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of master
+// Test SERVER-623 - starting repl peer from a new snapshot of master
+
+print("snapshot2.js 1 -----------------------------------------------------------");
ports = allocatePorts( 3 );
@@ -7,21 +9,37 @@ var basePath = "/data/db/" + baseName;
a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
+
+print("snapshot2.js 2 -----------------------------------------------------------");
-rp = new ReplPair( l, r, a );
-rp.start();
-rp.waitForSteadyState();
+rp = new ReplPair(l, r, a);
+rp.start();
+print("snapshot2.js 3 -----------------------------------------------------------");
+rp.waitForSteadyState();
+
+print("snapshot2.js 4 -----------------------------------------------------------");
big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-for( i = 0; i < 500; ++i ) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if ( i % 250 == 249 ) {
- assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+rp.slave().setSlaveOk();
+print("snapshot2.js 5 -----------------------------------------------------------");
+for (i = 0; i < 500; ++i) {
+ rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
+ if (i % 250 == 249) {
+ function p() { return i + 1 == rp.slave().getDB(baseName)[baseName].count(); }
+ try {
+ assert.soon(p);
+ } catch (e) {
+ print("\n\n\nsnapshot2.js\ni+1:" + (i + 1));
+ print("slave count:" + rp.slave().getDB(baseName)[baseName].count());
+ sleep(2000);
+ print(p());
+ throw (e);
+ }
sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
}
-}
+}
+print("snapshot2.js 6 -----------------------------------------------------------");
rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
leftMaster = ( rp.master().host == rp.left().host );
@@ -47,5 +65,8 @@ assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
rp.master().getDB( baseName )[ baseName ].save( {i:500} );
assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
+assert( !rawMongoProgramOutput().match( /resync/ ) );
+assert(!rawMongoProgramOutput().match(/SyncException/));
+
+print("snapshot2.js SUCCESS ----------------");
+
diff --git a/jstests/replsets/randomcommands1.js b/jstests/replsets/randomcommands1.js
new file mode 100644
index 0000000..c451e74
--- /dev/null
+++ b/jstests/replsets/randomcommands1.js
@@ -0,0 +1,29 @@
+
+replTest = new ReplSetTest( {name: 'randomcommands1', nodes: 3} );
+
+nodes = replTest.startSet();
+replTest.initiate();
+
+master = replTest.getMaster();
+slaves = replTest.liveNodes.slaves;
+printjson(replTest.liveNodes);
+
+db = master.getDB("foo")
+t = db.foo
+
+ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
+
+t.save({a: 1000});
+t.ensureIndex( { a : 1 } )
+
+db.getLastError( 3 , 30000 )
+
+ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+t.reIndex()
+
+db.getLastError( 3 , 30000 )
+ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+replTest.stopSet( 15 )
+
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 6a18dff..5ac94e7 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -112,4 +112,5 @@ doTest = function( signal ) {
replTest.stopSet( signal );
}
-doTest( 15 );
+doTest( 15 );
+print("replset1.js SUCCESS");
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index eaa35ee..f18b467 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,111 +1,141 @@
-
-doTest = function( signal ) {
-
- // FAILING TEST
- // See below:
-
- // Test replication with getLastError
-
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
-
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
-
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
-
- // Call getMaster to return a reference to the node that's been
- // elected master.
- var master = replTest.getMaster();
-
- // Wait for replication to a single node
- master.getDB("test").bar.insert({n: 1});
-
- // Wait for initial sync
- replTest.awaitReplication();
-
- var slaves = replTest.liveNodes.slaves;
- slaves.forEach(function(slave) { slave.setSlaveOk(); });
-
- var testDB = "repl-test";
-
- var failed = false;
- var callGetLastError = function(w, timeout, db) {
- var result = master.getDB(db).getLastErrorObj( w , timeout );
- printjson( result );
- if(result['ok'] != 1) {
- print("FAILURE");
- failed = true;
- }
- }
-
- // Test getlasterror with multiple inserts
- // TEST FAILS HERE
- print("**** Try inserting a multiple records -- first insert ****")
- master.getDB(testDB).foo.insert({n: 1});
- master.getDB(testDB).foo.insert({n: 2});
- master.getDB(testDB).foo.insert({n: 3});
- callGetLastError(3, 10000, testDB);
-
- print("**** TEMP 1a ****")
-
- m1 = master.getDB(testDB).foo.findOne({n: 1});
- printjson( m1 );
- assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
-
- print("**** TEMP 1b ****")
-
- var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
- assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
-
- var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
- assert( s1['n'] == 1 , "Failed to replicate to slave 1 on multiple inserts");
-
-
- // Test getlasterror with a simple insert
- print("**** Try inserting a single record ****")
- master.getDB(testDB).dropDatabase();
- master.getDB(testDB).foo.insert({n: 1});
- callGetLastError(3, 10000, testDB);
-
- m1 = master.getDB(testDB).foo.findOne({n: 1});
- printjson( m1 );
- assert( m1['n'] == 1 , "Failed to save to master");
-
-
- var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
- assert( s0['n'] == 1 , "Failed to replicate to slave 0");
-
- var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
- assert( s1['n'] == 1 , "Failed to replicate to slave 1");
-
-
- // Test getlasterror with large insert
- print("**** Try inserting many records ****")
- bigData = new Array(2000).toString()
- for(var n=0; n<1000; n++) {
- master.getDB(testDB).baz.insert({n: n, data: bigData});
+print("\n\nreplset2.js BEGIN");
+
+doTest = function (signal) {
+
+ // FAILING TEST
+ // See below:
+
+ // Test replication with getLastError
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 });
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ var testDB = "repl-test";
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Wait for replication to a single node
+ master.getDB(testDB).bar.insert({ n: 1 });
+
+ // Wait for initial sync
+ replTest.awaitReplication();
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves.forEach(function (slave) { slave.setSlaveOk(); });
+
+ var failed = false;
+ var callGetLastError = function (w, timeout, db) {
+ try {
+ var result = master.getDB(db).getLastErrorObj(w, timeout);
+ print("replset2.js getLastError result: " + tojson(result));
+ if (result['ok'] != 1) {
+ print("replset2.js FAILURE getlasterror not ok");
+ failed = true;
+ }
+ }
+ catch (e) {
+ print("\nreplset2.js exception in getLastError: " + e + '\n');
+ throw e;
+ }
+ }
+
+ // Test getlasterror with multiple inserts
+ // TEST FAILS HEREg
+ print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ master.getDB(testDB).foo.insert({ n: 1 });
+ master.getDB(testDB).foo.insert({ n: 2 });
+ master.getDB(testDB).foo.insert({ n: 3 });
+
+ print("\nreplset2.js **** TEMP 1 ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ callGetLastError(3, 25000, testDB);
+
+ print("replset2.js **** TEMP 1a ****")
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts");
+
+ print("replset2.js **** TEMP 1b ****")
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts");
+
+ // Test getlasterror with a simple insert
+ print("replset2.js **** Try inserting a single record ****")
+ master.getDB(testDB).dropDatabase();
+ master.getDB(testDB).foo.insert({ n: 1 });
+ callGetLastError(3, 10000, testDB);
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master");
+
+ s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0");
+
+ s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
+
+ // Test getlasterror with large insert
+ print("replset2.js **** Try inserting many records ****")
+ try {
+ bigData = new Array(2000).toString()
+ for (var n = 0; n < 1000; n++) {
+ master.getDB(testDB).baz.insert({ n: n, data: bigData });
+ }
+ callGetLastError(3, 60000, testDB);
+
+ print("replset2.js **** V1 ")
+
+ var verifyReplication = function (nodeName, collection) {
+ data = collection.findOne({ n: 1 });
+ assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
+ data = collection.findOne({ n: 999 });
+ assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
+ }
+
+ print("replset2.js **** V2 ")
+
+ verifyReplication("master", master.getDB(testDB).baz);
+ verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
+ verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+
+ assert(failed == false, "replset2.js Replication with getLastError failed. See errors.");
}
- callGetLastError(3, 60000, testDB);
-
- var verifyReplication = function(nodeName, collection) {
- data = collection.findOne({n: 1});
- assert( data['n'] == 1 , "Failed to save to " + nodeName);
- data = collection.findOne({n: 999});
- assert( data['n'] == 999 , "Failed to save to " + nodeName);
+ catch(e) {
+ print("ERROR: " + e);
+ print("Master oplog findOne:");
+ printjson(master.getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ print("Slave 0 oplog findOne:");
+ printjson(slaves[0].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ print("Slave 1 oplog findOne:");
+ printjson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
}
- verifyReplication("master", master.getDB(testDB).baz);
- verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
- verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
-
- assert( failed == false, "Replication with getLastError failed. See errors." );
-
- replTest.stopSet( signal );
+
+ replTest.stopSet(signal);
}
-doTest( 15 );
+doTest( 15 );
+
+print("\nreplset2.js SUCCESS\n");
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 4f6c454..9b1f2e9 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -1,29 +1,44 @@
-doTest = function( signal ) {
-
- // Test orphaned master steps down
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
-
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getMaster();
-
- // Kill both slaves, simulating a network partition
- var slaves = replTest.liveNodes.slaves;
- for(var i=0; i<slaves.length; i++) {
- var slave_id = replTest.getNodeId(slaves[i]);
- replTest.stop( slave_id );
- }
-
- var result = master.getDB("admin").runCommand({ismaster: 1});
- printjson( result );
- assert.soon(function() {
- var result = master.getDB("admin").runCommand({ismaster: 1});
- printjson( result );
- return (result['ok'] == 1 && result['ismaster'] == false);
- }, "Master fails to step down when orphaned.");
-
- replTest.stopSet( signal );
-}
-
+doTest = function (signal) {
+
+ // Test orphaned master steps down
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+
+ replTest.startSet();
+ replTest.initiate();
+
+ var master = replTest.getMaster();
+
+ // Kill both slaves, simulating a network partition
+ var slaves = replTest.liveNodes.slaves;
+ for (var i = 0; i < slaves.length; i++) {
+ var slave_id = replTest.getNodeId(slaves[i]);
+ replTest.stop(slave_id);
+ }
+
+ print("replset4.js 1");
+
+ var result = master.getDB("admin").runCommand({ ismaster: 1 });
+
+ print("replset4.js 2");
+ printjson(result);
+
+ assert.soon(
+ function () {
+ try {
+ var result = master.getDB("admin").runCommand({ ismaster: 1 });
+ return (result['ok'] == 1 && result['ismaster'] == false);
+ } catch (e) {
+ print("replset4.js caught " + e);
+ return false;
+ }
+ },
+ "Master fails to step down when orphaned."
+ );
+
+ print("replset4.js worked, stopping");
+ replTest.stopSet(signal);
+}
+
+print("replset4.js");
doTest( 15 );
+print("replset4.js SUCCESS");
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
new file mode 100644
index 0000000..fe1761e
--- /dev/null
+++ b/jstests/replsets/replset5.js
@@ -0,0 +1,72 @@
+// rs test getlasterrordefaults
+
+doTest = function (signal) {
+
+ // Test getLastError defaults
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+
+ var nodes = replTest.startSet();
+
+ // Initiate set with default for getLastError
+ var config = replTest.getReplSetConfig();
+ config.settings = {};
+ config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 };
+
+ replTest.initiate(config);
+
+ //
+ var master = replTest.getMaster();
+ replTest.awaitSecondaryNodes();
+ var testDB = "foo";
+
+ // Initial replication
+ master.getDB("barDB").bar.save({ a: 1 });
+ replTest.awaitReplication();
+
+ // These writes should be replicated immediately
+ master.getDB(testDB).foo.insert({ n: 1 });
+ master.getDB(testDB).foo.insert({ n: 2 });
+ master.getDB(testDB).foo.insert({ n: 3 });
+
+ // *** NOTE ***: The default doesn't seem to be propogating.
+ // When I run getlasterror with no defaults, the slaves don't have the data:
+ // These getlasterror commands can be run individually to verify this.
+ //master.getDB("admin").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 });
+ master.getDB("admin").runCommand({getlasterror: 1});
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves[0].setSlaveOk();
+ slaves[1].setSlaveOk();
+
+ print("Testing slave counts");
+
+ // These should all have 3 documents, but they don't always.
+ var master1count = master.getDB(testDB).foo.count();
+ assert( master1count == 3, "Master has " + master1count + " of 3 documents!");
+
+ var slave0count = slaves[0].getDB(testDB).foo.count();
+ assert( slave0count == 3, "Slave 0 has " + slave0count + " of 3 documents!");
+
+ var slave1count = slaves[1].getDB(testDB).foo.count();
+ assert( slave1count == 3, "Slave 1 has " + slave1count + " of 3 documents!");
+
+ print("Testing slave 0");
+
+ var s0 = slaves[0].getDB(testDB).foo.find();
+ assert(s0.next()['n']);
+ assert(s0.next()['n']);
+ assert(s0.next()['n']);
+
+ print("Testing slave 1");
+
+ var s1 = slaves[1].getDB(testDB).foo.find();
+ assert(s1.next()['n']);
+ assert(s1.next()['n']);
+ assert(s1.next()['n']);
+
+ // End test
+ replTest.stopSet(signal);
+}
+
+doTest( 15 );
+print("replset5.js success");
diff --git a/jstests/replsets/replset_remove_node.js b/jstests/replsets/replset_remove_node.js
index e06a951..fcb754c 100644
--- a/jstests/replsets/replset_remove_node.js
+++ b/jstests/replsets/replset_remove_node.js
@@ -27,8 +27,13 @@ doTest = function( signal ) {
// Remove that node from the configuration
replTest.remove( slaveId );
- // Then, reinitiate
- replTest.reInitiate();
+ // Now, re-initiate
+ var c = master.getDB("local")['system.replset'].findOne();
+ var config = replTest.getReplSetConfig();
+ config.version = c.version + 1;
+ config.members = [ { "_id" : 0, "host" : replTest.host + ":31000" },
+ { "_id" : 2, "host" : replTest.host + ":31002" } ]
+ replTest.initiate( config , 'replSetReconfig' );
// Make sure that a new master comes up
master = replTest.getMaster();
@@ -52,6 +57,8 @@ doTest = function( signal ) {
stat = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
return stat.members.length == 2;
}, "Wrong number of members", 60000);
-}
-
-doTest( 15 );
+}
+
+print("replset_remove_node.js");
+doTest(15);
+print("replset_remove_node SUCCESS");
diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js
index f072d61..8840371 100644
--- a/jstests/replsets/rollback.js
+++ b/jstests/replsets/rollback.js
@@ -28,7 +28,7 @@ function wait(f) {
var n = 0;
while (!f()) {
if( n % 4 == 0 )
- print("waiting " + w);
+ print("rollback.js waiting " + w);
if (++n == 4) {
print("" + f);
}
@@ -72,6 +72,31 @@ doTest = function (signal) {
// Wait for initial replication
var a = a_conn.getDB("foo");
var b = b_conn.getDB("foo");
+
+ /* force the oplog to roll */
+ if (new Date() % 2 == 0) {
+ print("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
+ var pass = 1;
+ var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ a.roll.insert({ x: 1 });
+ while (1) {
+ for (var i = 0; i < 10000; i++)
+ a.roll.update({}, { $inc: { x: 1} });
+ var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ if (tojson(op.h) != tojson(first.h)) {
+ printjson(op);
+ printjson(first);
+ break;
+ }
+ pass++;
+ a.getLastError(2); // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that.
+ }
+ print("PASSES FOR OPLOG ROLL: " + pass);
+ }
+ else {
+ print("NO ROLL");
+ }
+
a.bar.insert({ q: 1, a: "foo" });
a.bar.insert({ q: 2, a: "foo", x: 1 });
a.bar.insert({ q: 3, bb: 9, a: "foo" });
@@ -122,8 +147,9 @@ doTest = function (signal) {
friendlyEqual(a.bar.find().sort({ _id: 1 }).toArray(), b.bar.find().sort({ _id: 1 }).toArray(), "server data sets do not match");
- pause("SUCCESS");
+ pause("rollback.js SUCCESS");
replTest.stopSet(signal);
}
+print("rollback.js");
doTest( 15 );
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index f9c48ff..483d221 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -27,8 +27,8 @@ function wait(f) {
w++;
var n = 0;
while (!f()) {
- if( n % 4 == 0 )
- print("waiting " + w);
+ if (n % 4 == 0)
+ print("rollback2.js waiting " + w);
if (++n == 4) {
print("" + f);
}
@@ -192,8 +192,10 @@ doTest = function (signal) {
assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
- pause("SUCCESS");
+ pause("rollback2.js SUCCESS");
replTest.stopSet(signal);
}
+print("rollback2.js");
+
doTest( 15 );
diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js
new file mode 100755
index 0000000..5c2f2f1
--- /dev/null
+++ b/jstests/replsets/rollback3.js
@@ -0,0 +1,224 @@
+// test rollback in replica sets
+
+// try running as :
+//
+// mongo --nodb rollback.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if (n % 4 == 0)
+ print("rollback3.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ if (n == 200) {
+ print("rollback3.js failing waited too long");
+ throw "wait error";
+ }
+ sleep(1000);
+ }
+}
+
+function dbs_match(a, b) {
+ print("dbs_match");
+
+ var ac = a.system.namespaces.find().sort({name:1}).toArray();
+ var bc = b.system.namespaces.find().sort({name:1}).toArray();
+ if (!friendlyEqual(ac, bc)) {
+ print("dbs_match: namespaces don't match");
+ print("\n\n");
+ printjson(ac);
+ print("\n\n");
+ printjson(bc);
+ print("\n\n");
+ return false;
+ }
+
+ var c = a.getCollectionNames();
+ for( var i in c ) {
+ print("checking " + c[i]);
+ // system.indexes doesn't have _id so the more involved sort here:
+ if (!friendlyEqual(a[c[i]].find().sort({ _id: 1, ns:1, name:1 }).toArray(), b[c[i]].find().sort({ _id: 1, ns:1,name:1 }).toArray())) {
+ print("dbs_match: collections don't match " + c[i]);
+ if (a[c[i]].count() < 12) {
+ printjson(a[c[i]].find().sort({ _id: 1 }).toArray());
+ printjson(b[c[i]].find().sort({ _id: 1 }).toArray());
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+/* these writes will be initial data and replicate everywhere. */
+function doInitialWrites(db) {
+ db.b.insert({ x: 1 });
+ db.b.ensureIndex({ x: 1 });
+ db.oldname.insert({ y: 1 });
+ db.oldname.insert({ y: 2 });
+ db.oldname.ensureIndex({ y: 1 },true);
+ t = db.bar;
+ t.insert({ q:0});
+ t.insert({ q: 1, a: "foo" });
+ t.insert({ q: 2, a: "foo", x: 1 });
+ t.insert({ q: 3, bb: 9, a: "foo" });
+ t.insert({ q: 40333333, a: 1 });
+ for (var i = 0; i < 200; i++) t.insert({ i: i });
+ t.insert({ q: 40, a: 2 });
+ t.insert({ q: 70, txt: 'willremove' });
+
+ db.createCollection("kap", { capped: true, size: 5000 });
+ db.kap.insert({ foo: 1 })
+}
+
+/* these writes on one primary only and will be rolled back. */
+function doItemsToRollBack(db) {
+ t = db.bar;
+ t.insert({ q: 4 });
+ t.update({ q: 3 }, { q: 3, rb: true });
+
+ t.remove({ q: 40 }); // multi remove test
+
+ t.update({ q: 2 }, { q: 39, rb: true });
+
+ // rolling back a delete will involve reinserting the item(s)
+ t.remove({ q: 1 });
+
+ t.update({ q: 0 }, { $inc: { y: 1} });
+
+ db.kap.insert({ foo: 2 })
+ db.kap2.insert({ foo: 2 })
+
+ // create a collection (need to roll back the whole thing)
+ db.newcoll.insert({ a: true });
+
+ // create a new empty collection (need to roll back the whole thing)
+ db.createCollection("abc");
+
+ // drop a collection - we'll need all its data back!
+ t.drop();
+
+ // drop an index - verify it comes back
+ db.b.dropIndexes();
+
+ // two to see if we transitively rollback?
+ db.oldname.renameCollection("newname");
+ db.newname.renameCollection("fooname");
+
+ assert(db.fooname.count() > 0, "count rename");
+
+ // test roll back (drop) a whole database
+ abc = db.getSisterDB("abc");
+ abc.foo.insert({ x: 1 });
+ abc.bar.insert({ y: 999 });
+
+ // test making and dropping a database
+ //mkd = db.getSisterDB("mkd");
+ //mkd.c.insert({ y: 99 });
+ //mkd.dropDatabase();
+}
+
+function doWritesToKeep2(db) {
+ t = db.bar;
+ t.insert({ txt: 'foo' });
+ t.remove({ q: 70 });
+ t.update({ q: 0 }, { $inc: { y: 33} });
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+ doInitialWrites(a);
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == a.bar.count(); });
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ wait(function () { return B.isMaster().ismaster; });
+
+ doItemsToRollBack(b);
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ A.runCommand({ replSetTest: 1, blind: false });
+ wait(function () { return !B.isMaster().ismaster; });
+ wait(function () { return A.isMaster().ismaster; });
+
+ assert(a.bar.count() >= 1, "count check");
+ doWritesToKeep2(a);
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ // as A is primary, B will roll back and then catch up
+ B.runCommand({ replSetTest: 1, blind: false });
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+
+ assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
+
+ pause("rollback3.js SUCCESS");
+ replTest.stopSet(signal);
+}
+
+print("rollback3.js");
+doTest( 15 );
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
index 0f7754e..e60d128 100644
--- a/jstests/replsets/sync1.js
+++ b/jstests/replsets/sync1.js
@@ -2,8 +2,10 @@
var debugging=0;
+w = 0;
+
function pause(s) {
- // for debugging just to keep processes running
+ // for debugging just to keep processes running
print("\nsync1.js: " + s);
if (debugging) {
while (1) {
@@ -11,180 +13,197 @@ function pause(s) {
sleep(4000);
}
}
-}
-
-doTest = function (signal) {
-
+}
+
+doTest = function (signal) {
+
var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
var nodes = replTest.startSet({ oplogSize: "40" });
-
- sleep(5000);
-
- print("\nsync1.js ********************************************************************** part 0");
- replTest.initiate();
-
+
+ sleep(5000);
+
+ print("\nsync1.js ********************************************************************** part 0");
+ replTest.initiate();
+
// get master
- print("\nsync1.js ********************************************************************** part 1");
- var master = replTest.getMaster();
- print("\nsync1.js ********************************************************************** part 2");
- var dbs = [master.getDB("foo")];
-
- for (var i in nodes) {
- if (nodes[i] + "" == master + "") {
- continue;
- }
- dbs.push(nodes[i].getDB("foo"));
- nodes[i].setSlaveOk();
- }
-
- print("\nsync1.js ********************************************************************** part 3");
- dbs[0].bar.drop();
-
- print("\nsync1.js ********************************************************************** part 4");
+ print("\nsync1.js ********************************************************************** part 1");
+ var master = replTest.getMaster();
+ print("\nsync1.js ********************************************************************** part 2");
+ var dbs = [master.getDB("foo")];
+
+ for (var i in nodes) {
+ if (nodes[i] + "" == master + "") {
+ continue;
+ }
+ dbs.push(nodes[i].getDB("foo"));
+ nodes[i].setSlaveOk();
+ }
+
+ print("\nsync1.js ********************************************************************** part 3");
+ dbs[0].bar.drop();
+
+ print("\nsync1.js ********************************************************************** part 4");
// slow things down a bit
- dbs[0].bar.ensureIndex({ x: 1 });
- dbs[0].bar.ensureIndex({ y: 1 });
- dbs[0].bar.ensureIndex({ z: 1 });
- dbs[0].bar.ensureIndex({ w: 1 });
-
- var ok = false;
- var inserts = 100000;
-
- print("\nsync1.js ********************************************************************** part 5");
-
- for (var i = 0; i < inserts; i++) {
- dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
- }
-
- var status;
- do {
- sleep(1000);
- status = dbs[0].getSisterDB("admin").runCommand({replSetGetStatus : 1});
- } while(status.members[1].state != 2 && status.members[2].state != 2);
-
- print("\nsync1.js ********************************************************************** part 6");
- dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
-
- print("\nsync1.js ********************************************************************** part 7");
-
- sleep(5000);
-
- // yay! there are out-of-date nodes
- var max1;
- var max2;
- var count = 0;
- while( 1 ) {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch(e) {
- print("\nsync1.js couldn't get max1/max2; retrying " + e);
- sleep(2000);
- count++;
- if (count == 50) {
- assert(false, "errored out 50 times");
- }
- continue;
- }
- break;
- }
-
- print("\nsync1.js ********************************************************************** part 8");
-
- if (max1.z == (inserts-1) && max2.z == (inserts-1)) {
- print("\nsync1.js try increasing # if inserts and running again");
- replTest.stopSet(signal);
- return;
- }
-
- // wait for a new master to be elected
- sleep(5000);
-
- // figure out who is master now
- var newMaster = replTest.getMaster();
-
- print("\nsync1.js ********************************************************************** part 9");
-
- print("\nsync1.js \nsync1.js ********************************************************************** part 9 **********************************************");
+ dbs[0].bar.ensureIndex({ x: 1 });
+ dbs[0].bar.ensureIndex({ y: 1 });
+ dbs[0].bar.ensureIndex({ z: 1 });
+ dbs[0].bar.ensureIndex({ w: 1 });
+
+ var ok = false;
+ var inserts = 100000;
+
+ print("\nsync1.js ********************************************************************** part 5");
+
+ for (var i = 0; i < inserts; i++) {
+ dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
+ }
+
+ var status;
+ do {
+ sleep(1000);
+ status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
+ } while (status.members[1].state != 2 && status.members[2].state != 2);
+
+ print("\nsync1.js ********************************************************************** part 6");
+ dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
+
+ print("\nsync1.js ********************************************************************** part 7");
+
+ sleep(5000);
+
+ var max1;
+ var max2;
+ var count = 0;
+ while (1) {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js couldn't get max1/max2; retrying " + e);
+ sleep(2000);
+ count++;
+ if (count == 50) {
+ assert(false, "errored out 50 times");
+ }
+ continue;
+ }
+ break;
+ }
+
+ // wait for a new master to be elected
+ sleep(5000);
+ var newMaster;
+
+ print("\nsync1.js ********************************************************************** part 9");
+
+ for (var q = 0; q < 10; q++) {
+ // figure out who is master now
+ newMaster = replTest.getMaster();
+ if (newMaster + "" != master + "")
+ break;
+ sleep(2000);
+ if (q > 6) print("sync1.js zzz....");
+ }
+
assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
+
print("\nsync1.js new master is " + newMaster + ", old master was " + master);
-
- count = 0;
- do {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch( e ) {
- print("\nsync1.js: exception querying; will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
-
+
+ print("\nsync1.js ********************************************************************** part 9.1");
+
+ count = 0;
+ countExceptions = 0;
+ do {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ if (countExceptions++ > 300) {
+ print("dbs[1]:");
+ try {
+ printjson(dbs[1].isMaster());
+ printjson(dbs[1].bar.count());
+ }
+ catch (e) { print(e); }
+ print("dbs[2]:");
+ try {
+ printjson(dbs[2].isMaster());
+ printjson(dbs[2].bar.count());
+ }
+ catch (e) { print(e); }
+ assert(false, "sync1.js too many exceptions, failing");
+ }
+ print("\nsync1.js: exception querying; will sleep and try again " + e);
+ sleep(3000);
+ continue;
+ }
+
+ print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
+
// printjson(max1);
// printjson(max2);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail phase 1");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- } while (max1.z != max2.z);
-
- // okay, now they're caught up. We have a max:
- var max = max1.z;
-
- print("\nsync1.js ********************************************************************** part 10");
-
- // now, let's see if rollback works
- var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
- dbs[0].getMongo().setSlaveOk();
-
- printjson(result);
- sleep(5000);
-
- // FAIL! This never resyncs
- // now this should resync
- print("\nsync1.js ********************************************************************** part 11");
- var max0 = null;
- count = 0;
- do {
- try {
- max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch(e) {
- print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- printjson(max);
- printjson(max0);
- print("\nsync1.js part 11 waiting for match " + count + " " + Date() + " z[0]:" + max0.z + " z:" + max);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail part 11");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- print("||||| count:" + count);
- printjson(max0);
- } while (! max0 || max0.z != max);
-
- print("\nsync1.js ********************************************************************** part 12");
- pause("\nsync1.js success");
- replTest.stopSet(signal);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail phase 1");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ } while (max1.z != max2.z);
+
+ // okay, now they're caught up. We have a max: max1.z
+
+ print("\nsync1.js ********************************************************************** part 10");
+
+ // now, let's see if rollback works
+ var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
+ dbs[0].getMongo().setSlaveOk();
+
+ printjson(result);
+ sleep(5000);
+
+ // now this should resync
+ print("\nsync1.js ********************************************************************** part 11");
+ var max0 = null;
+ count = 0;
+ do {
+ try {
+ max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ print("part 11");
+ if (max0) {
+ print("max0.z:" + max0.z);
+ print("max1.z:" + max1.z);
+ }
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("FAIL part 11");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ //print("||||| count:" + count);
+ //printjson(max0);
+ } while (!max0 || max0.z != max1.z);
+
+ print("\nsync1.js ********************************************************************** part 12");
+ pause("\nsync1.js success");
+ replTest.stopSet(signal);
}
if( 1 || debugging ) {
diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js
new file mode 100755
index 0000000..6ae8475
--- /dev/null
+++ b/jstests/replsets/two_initsync.js
@@ -0,0 +1,93 @@
+// test initial sync failing
+
+// try running as :
+//
+// mongo --nodb two_initsync.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("twoinitsync waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ sleep(1000);
+ }
+}
+
+doTest = function (signal) {
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 0 });
+
+ var first = replTest.add();
+
+ // Initiate replica set
+ assert.soon(function () {
+ var res = first.getDB("admin").runCommand({ replSetInitiate: null });
+ return res['ok'] == 1;
+ });
+
+ // Get status
+ assert.soon(function () {
+ var result = first.getDB("admin").runCommand({ replSetGetStatus: true });
+ return result['ok'] == 1;
+ });
+
+ var a = replTest.getMaster().getDB("two");
+ for (var i = 0; i < 20000; i++)
+ a.coll.insert({ i: i, s: "a b" });
+
+ // Start a second node
+ var second = replTest.add();
+
+ // Add the second node.
+ // This runs the equivalent of rs.add(newNode);
+ replTest.reInitiate();
+
+ var b = second.getDB("admin");
+
+ // attempt to interfere with the initial sync
+ b._adminCommand({ replSetTest: 1, forceInitialSyncFailure: 1 });
+
+ // wait(function () { return a._adminCommand("replSetGetStatus").members.length == 2; });
+
+ wait(function () { return b.isMaster().secondary || b.isMaster().ismaster; });
+
+ print("b.isMaster:");
+ printjson(b.isMaster());
+
+ second.setSlaveOk();
+
+ print("b.isMaster:");
+ printjson(b.isMaster());
+
+ wait(function () { var c = b.getSisterDB("two").coll.count(); print(c); return c == 20000; });
+
+ print("two_initsync.js SUCCESS");
+
+ replTest.stopSet(signal);
+}
+
+
+print("two_initsync.js");
+doTest( 15 );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 4ab75ee..a2a8197 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -64,7 +64,7 @@ killTime = (new Date()).getTime() - killTime.getTime()
print( "killTime: " + killTime );
assert.eq( 2 , state , "failed killing" );
-assert.gt( 3000 , killTime , "took too long to kill" )
+assert.gt( 10000 , killTime , "took too long to kill" )
join()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 9f0cef4..86faedc 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -139,25 +139,25 @@ assert.eq( 0 , doCounts( "after dropDatabase called" ) )
s.adminCommand( { enablesharding : "test2" } );
s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
-a = s.getDB( "test2" ).foo;
-b = s2.getDB( "test2" ).foo;
-a.save( { num : 1 } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-
+dba = s.getDB( "test2" );
+dbb = s2.getDB( "test2" );
+dba.foo.save( { num : 1 } );
+dba.foo.save( { num : 2 } );
+dba.foo.save( { num : 3 } );
+dba.getLastError();
assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
-assert.eq( 3 , a.count() , "Ba" );
-assert.eq( 3 , b.count() , "Bb" );
+assert.eq( 3 , dba.foo.count() , "Ba" );
+assert.eq( 3 , dbb.foo.count() , "Bb" );
s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } );
s.adminCommand( { movechunk : "test2.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test2" ) ).name } );
assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
-x = a.stats()
+x = dba.foo.stats()
printjson( x )
-y = b.stats()
+y = dbb.foo.stats()
printjson( y )