summaryrefslogtreecommitdiff
path: root/jstests/slowWeekly
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-08-11 12:38:57 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-08-11 12:38:57 +0200
commit7645618fd3914cb8a20561625913c20d49504a49 (patch)
tree8370f846f58f6d71165b7a0e2eda04648584ec76 /jstests/slowWeekly
parent68c73c3c7608b4c87f07440dc3232801720b1168 (diff)
downloadmongodb-7645618fd3914cb8a20561625913c20d49504a49.tar.gz
Imported Upstream version 1.6.0
Diffstat (limited to 'jstests/slowWeekly')
-rw-r--r--jstests/slowWeekly/conc_update.js51
-rw-r--r--jstests/slowWeekly/indexbg1.js117
-rw-r--r--jstests/slowWeekly/indexbg2.js83
-rw-r--r--jstests/slowWeekly/ns1.js49
-rw-r--r--jstests/slowWeekly/query_yield1.js73
-rw-r--r--jstests/slowWeekly/query_yield2.js73
-rw-r--r--jstests/slowWeekly/update_yield1.js78
7 files changed, 524 insertions, 0 deletions
diff --git a/jstests/slowWeekly/conc_update.js b/jstests/slowWeekly/conc_update.js
new file mode 100644
index 0000000..6094136
--- /dev/null
+++ b/jstests/slowWeekly/conc_update.js
@@ -0,0 +1,51 @@
+db = db.getSisterDB("concurrency")
+db.dropDatabase();
+
+NRECORDS=5*1024*1024 // this needs to be relatively big so that
+ // the update() will take a while, but it could
+ // probably be smaller.
+
+print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)")
+for (i=0; i<(NRECORDS); i++) {
+ db.conc.insert({x:i})
+ if ((i%(1024*1024))==0)
+ print("loaded " + i/(1024*1024) + " mibi-records")
+}
+
+print("making an index (this will take a while)")
+db.conc.ensureIndex({x:1})
+
+var c1=db.conc.count({x:{$lt:NRECORDS}})
+// this is just a flag that the child will toggle when it's done.
+db.concflag.update({}, {inprog:true}, true)
+
+updater=startParallelShell("db=db.getSisterDB('concurrency');\
+ db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
+ e=db.getLastError();\
+ print('update error: '+ e);\
+ db.concflag.update({},{inprog:false});\
+ assert.eq(e, null, \"update failed\");");
+
+querycount=0;
+decrements=0;
+misses=0
+while (1) {
+ if (db.concflag.findOne().inprog) {
+ c2=db.conc.count({x:{$lt:NRECORDS}})
+ e=db.getLastError()
+ print(c2)
+ print(e)
+ assert.eq(e, null, "some count() failed")
+ querycount++;
+ if (c2<c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ } else
+ break;
+ sleep(10);
+}
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+updater() // wait()
diff --git a/jstests/slowWeekly/indexbg1.js b/jstests/slowWeekly/indexbg1.js
new file mode 100644
index 0000000..5e34d44
--- /dev/null
+++ b/jstests/slowWeekly/indexbg1.js
@@ -0,0 +1,117 @@
+// Test background index creation
+
+parallel = function() {
+ return db[ baseName + "_parallelStatus" ];
+}
+
+resetParallel = function() {
+ parallel().drop();
+}
+
+doParallel = function(work) {
+ resetParallel();
+ print("doParallel: " + work);
+ startMongoProgramNoConnect("mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host);
+}
+
+doneParallel = function() {
+ return !!parallel().findOne();
+}
+
+waitParallel = function() {
+ assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+}
+
+// waiting on SERVER-620
+
+print( "index11.js host:" );
+print( db.getMongo().host );
+
+if (1) {
+
+size = 500000;
+while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print( "size: " + size );
+ baseName = "jstests_index11";
+ fullName = "db." + baseName;
+ t = db[ baseName ];
+ t.drop();
+
+ db.eval( function( size ) {
+ for( i = 0; i < size; ++i ) {
+ db.jstests_index11.save( {i:i} );
+ }
+ },
+ size );
+ assert.eq( size, t.count() );
+
+ doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" );
+ try {
+ // wait for indexing to start
+ print("wait for indexing to start");
+ assert.soon( function() { return 2 == db.system.indexes.count( {ns:"test."+baseName} ) }, "no index created", 30000, 50 );
+ print("started.");
+ assert.eq( size, t.count() );
+ assert.eq( 100, t.findOne( {i:100} ).i );
+ q = t.find();
+ for( i = 0; i < 120; ++i ) { // getmore
+ q.next();
+ assert( q.hasNext(), "no next" );
+ }
+ assert.eq( "BasicCursor", t.find( {i:100} ).explain().cursor, "used btree cursor" );
+ t.remove( {i:40} );
+ t.update( {i:10}, {i:-10} );
+ id = t.find().hint( {$natural:-1} )._id;
+ t.update( {_id:id}, {i:-2} );
+ t.save( {i:-50} );
+ t.save( {i:size+2} );
+ assert( !db.getLastError() );
+
+ print("calling ensureIndex");
+ t.ensureIndex( {i:1} );
+
+ printjson( db.getLastError() );
+ assert( db.getLastError() );
+ assert.eq( size + 1, t.count() );
+ assert( !db.getLastError() );
+
+ print("calling dropIndex");
+ t.dropIndex( {i:1} );
+ printjson( db.getLastError() );
+ assert( db.getLastError() );
+ } catch( e ) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ print("caught exception");
+ sleep( 1000 );
+ if ( !doneParallel() ) {
+ throw e;
+ }
+ print("but that's OK")
+ }
+ if ( !doneParallel() ) {
+ break;
+ }
+ print( "indexing finished too soon, retrying..." );
+ size *= 2;
+ assert( size < 20000000, "unable to run checks in parallel with index creation" );
+}
+
+print("our tests done, waiting for parallel to finish");
+waitParallel();
+print("finished");
+
+assert.eq( "BtreeCursor i_1", t.find( {i:100} ).explain().cursor );
+assert.eq( 1, t.count( {i:-10} ) );
+assert.eq( 1, t.count( {i:-2} ) );
+assert.eq( 1, t.count( {i:-50} ) );
+assert.eq( 1, t.count( {i:size+2} ) );
+assert.eq( 0, t.count( {i:40} ) );
+assert( !db.getLastError() );
+print("about to drop index");
+t.dropIndex( {i:1} );
+printjson( db.getLastError() );
+assert( !db.getLastError() );
+
+} // if 1
+
diff --git a/jstests/slowWeekly/indexbg2.js b/jstests/slowWeekly/indexbg2.js
new file mode 100644
index 0000000..1830f42
--- /dev/null
+++ b/jstests/slowWeekly/indexbg2.js
@@ -0,0 +1,83 @@
+// Test background index creation w/ constraints
+
+parallel = function() {
+ return db[ baseName + "_parallelStatus" ];
+}
+
+resetParallel = function() {
+ parallel().drop();
+}
+
+doParallel = function( work ) {
+ resetParallel();
+ startMongoProgramNoConnect( "mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host );
+}
+
+doneParallel = function() {
+ return !!parallel().findOne();
+}
+
+waitParallel = function() {
+ assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+}
+
+doTest = function(dropDups) {
+
+ size = 10000;
+ while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+ baseName = "jstests_index12";
+ fullName = "db." + baseName;
+ t = db[baseName];
+ t.drop();
+
+ db.eval(function(size) {
+ for (i = 0; i < size; ++i) {
+ db.jstests_index12.save({ i: i });
+ }
+ },
+ size);
+ assert.eq(size, t.count());
+
+ doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true, dropDups:" + dropDups + "} )");
+ try {
+ // wait for indexing to start
+ assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50);
+ t.save({ i: 0, n: true });
+ //printjson(db.getLastError());
+ t.save({ i: size - 1, n: true });
+ //printjson(db.getLastError());
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ sleep(1000);
+ if (!doneParallel()) {
+ throw e;
+ }
+ }
+ if (!doneParallel()) {
+ break;
+ }
+ print("indexing finished too soon, retrying...");
+ size *= 2;
+ assert(size < 5000000, "unable to run checks in parallel with index creation");
+ }
+
+ waitParallel();
+
+ if( dropDups == "true" ) {
+ assert.eq(size, t.find().toArray().length, "full query failed");
+ assert.eq(size, t.count(), "count failed");
+ }
+ else {
+ /* without dropdups, it could be that there is more than size now but the index failed
+ to build - which is valid. we check index isn't there.
+ */
+ if (t.count() != size)
+ assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
+ }
+
+}
+
+doTest( "false" );
+doTest( "true" );
diff --git a/jstests/slowWeekly/ns1.js b/jstests/slowWeekly/ns1.js
new file mode 100644
index 0000000..f51db01
--- /dev/null
+++ b/jstests/slowWeekly/ns1.js
@@ -0,0 +1,49 @@
+
+mydb = db.getSisterDB( "test_ns1" );
+mydb.dropDatabase();
+
+check = function( n , isNew ){
+ var coll = mydb["x" + n];
+ if ( isNew ){
+ assert.eq( 0 , coll.count() , "pop a: " + n );
+ coll.insert( { _id : n } );
+ }
+ assert.eq( 1 , coll.count() , "pop b: " + n );
+ assert.eq( n , coll.findOne()._id , "pop c: " + n );
+ return coll;
+}
+
+max = 0;
+
+for ( ; max<1000; max++ ){
+ check(max,true);
+}
+
+function checkall( removed ){
+ for ( var i=0; i<max; i++ ){
+ if ( removed == i ){
+ assert.eq( 0 , mydb["x"+i].count() , "should be 0 : " + removed );
+ }
+ else {
+ check( i , false );
+ }
+ }
+}
+
+checkall();
+
+Random.srand( 123124 );
+its = max / 2;
+print( "its: " + its );
+for ( i=0; i<its; i++ ){
+ x = Random.randInt( max );
+ check( x , false ).drop();
+ checkall( x );
+ check( x , true );
+ if ( ( i + 1 ) % 20 == 0 ){
+ print( i + "/" + its );
+ }
+}
+print( "yay" )
+
+mydb.dropDatabase();
diff --git a/jstests/slowWeekly/query_yield1.js b/jstests/slowWeekly/query_yield1.js
new file mode 100644
index 0000000..e996b53
--- /dev/null
+++ b/jstests/slowWeekly/query_yield1.js
@@ -0,0 +1,73 @@
+
+t = db.query_yield1;
+t.drop()
+
+N = 10000;
+i = 0;
+
+q = function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; }
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeQuery(){
+ return Date.timeFunc(
+ function(){
+ assert.eq( 0 , t.find( q ).itcount() );
+ }
+ );
+
+ }
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+assert.eq( 0, db.currentOp().inprog.length , "setup broken" );
+
+join = startParallelShell( "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); " )
+
+assert.soon(
+ function(){
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ } , "never doing query" , 2000 , 1
+);
+
+print( "start query" );
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var x = db.currentOp()
+
+ if ( num++ == 0 ){
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 50 , me );
+
+ if ( x.inprog.length == 0 )
+ break;
+
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
diff --git a/jstests/slowWeekly/query_yield2.js b/jstests/slowWeekly/query_yield2.js
new file mode 100644
index 0000000..e13fabe
--- /dev/null
+++ b/jstests/slowWeekly/query_yield2.js
@@ -0,0 +1,73 @@
+
+t = db.query_yield2;
+t.drop()
+
+N = 100;
+i = 0;
+
+q = function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; }
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeQuery(){
+ return Date.timeFunc(
+ function(){
+ assert.eq( 0 , t.find( q ).itcount() );
+ }
+ );
+
+ }
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+assert.eq( 0, db.currentOp().inprog.length , "setup broken" );
+
+join = startParallelShell( "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); " )
+
+assert.soon(
+ function(){
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ } , "never doing query" , 2000 , 1
+);
+
+print( "start query" );
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var x = db.currentOp()
+
+ if ( num++ == 0 ){
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 75 , me );
+
+ if ( x.inprog.length == 0 )
+ break;
+
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
diff --git a/jstests/slowWeekly/update_yield1.js b/jstests/slowWeekly/update_yield1.js
new file mode 100644
index 0000000..2e63690
--- /dev/null
+++ b/jstests/slowWeekly/update_yield1.js
@@ -0,0 +1,78 @@
+
+t = db.update_yield1;
+t.drop()
+
+N = 10000;
+i = 0;
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeUpdate(){
+ return Date.timeFunc(
+ function(){
+ t.update( {} , { $inc : { n : 1 } } , false , true );
+ var r = db.getLastErrorObj();
+ }
+ );
+
+ }
+
+ fill();
+ timeUpdate();
+ timeUpdate();
+ time = timeUpdate();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+join = startParallelShell( "db.update_yield1.update( {} , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+
+assert.soon(
+ function(){
+ return db.currentOp().inprog.length > 0;
+ } , "never doing update"
+);
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.findOne(); } );
+
+ if ( num++ == 0 ){
+ var x = db.currentOp()
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 50 , me );
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
+// --- test 2
+
+join = startParallelShell( "db.update_yield1.update( { $atomic : true } , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+
+assert.soon(
+ function(){
+ return db.currentOp().inprog.length > 0;
+ } , "never doing update 2"
+);
+
+t.findOne();
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "should have been atomic" );
+
+join();