summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2010-08-11 12:38:57 +0200
committerAntonin Kral <a.kral@bobek.cz>2010-08-11 12:38:57 +0200
commit7645618fd3914cb8a20561625913c20d49504a49 (patch)
tree8370f846f58f6d71165b7a0e2eda04648584ec76 /jstests
parent68c73c3c7608b4c87f07440dc3232801720b1168 (diff)
downloadmongodb-7645618fd3914cb8a20561625913c20d49504a49.tar.gz
Imported Upstream version 1.6.0
Diffstat (limited to 'jstests')
-rw-r--r--jstests/_fail.js4
-rw-r--r--jstests/_runner.js8
-rw-r--r--jstests/apitest_db.js14
-rw-r--r--jstests/apply_ops1.js51
-rw-r--r--jstests/arrayfind2.js8
-rw-r--r--jstests/capped3.js7
-rw-r--r--jstests/capped6.js82
-rw-r--r--jstests/capped7.js72
-rw-r--r--jstests/clone/clonecollection.js150
-rw-r--r--jstests/conc_update.js45
-rw-r--r--jstests/copydb-auth.js (renamed from jstests/copydb2.js)0
-rw-r--r--jstests/cursor8.js12
-rw-r--r--jstests/cursora.js34
-rw-r--r--jstests/datasize.js8
-rw-r--r--jstests/datasize2.js27
-rw-r--r--jstests/dbadmin.js5
-rw-r--r--jstests/dbcase.js23
-rw-r--r--jstests/dbhash.js9
-rw-r--r--jstests/disk/directoryperdb.js8
-rw-r--r--jstests/disk/repair.js37
-rw-r--r--jstests/disk/repair2.js47
-rw-r--r--jstests/disk/repair3.js52
-rw-r--r--jstests/disk/repair4.js44
-rw-r--r--jstests/distinct_array1.js24
-rw-r--r--jstests/distinct_speed1.js26
-rw-r--r--jstests/drop.js2
-rw-r--r--jstests/evalb.js3
-rw-r--r--jstests/explain2.js2
-rw-r--r--jstests/find_and_modify.js4
-rw-r--r--jstests/find_and_modify2.js10
-rw-r--r--jstests/find_and_modify3.js21
-rw-r--r--jstests/find_and_modify4.js55
-rw-r--r--jstests/fm4.js16
-rw-r--r--jstests/geo2.js1
-rw-r--r--jstests/geo3.js2
-rw-r--r--jstests/geo_box3.js36
-rw-r--r--jstests/geo_circle2.js23
-rw-r--r--jstests/geo_circle3.js28
-rw-r--r--jstests/geo_circle4.js24
-rw-r--r--jstests/geo_circle5.js28
-rw-r--r--jstests/geo_haystack1.js59
-rw-r--r--jstests/geo_haystack2.js60
-rw-r--r--jstests/geod.js14
-rw-r--r--jstests/geoe.js32
-rw-r--r--jstests/group6.js31
-rw-r--r--jstests/hint1.js4
-rw-r--r--jstests/in3.js4
-rw-r--r--jstests/in4.js53
-rw-r--r--jstests/in5.js56
-rw-r--r--jstests/in6.js13
-rw-r--r--jstests/in7.js6
-rw-r--r--jstests/index1.js6
-rw-r--r--jstests/index10.js4
-rw-r--r--jstests/index6.js2
-rw-r--r--jstests/index7.js18
-rw-r--r--jstests/index_check2.js2
-rw-r--r--jstests/index_check6.js52
-rw-r--r--jstests/index_check8.js15
-rw-r--r--jstests/index_elemmatch1.js28
-rw-r--r--jstests/index_many.js75
-rw-r--r--jstests/index_many2.js29
-rw-r--r--jstests/indexapi.js2
-rw-r--r--jstests/indexe.js2
-rw-r--r--jstests/indexh.js34
-rw-r--r--jstests/maxscan.js14
-rw-r--r--jstests/not2.js15
-rw-r--r--jstests/numberlong.js55
-rw-r--r--jstests/objid5.js12
-rw-r--r--jstests/objid6.js14
-rw-r--r--jstests/objid7.js13
-rw-r--r--jstests/or1.js57
-rw-r--r--jstests/or2.js68
-rw-r--r--jstests/or3.js64
-rw-r--r--jstests/or4.js98
-rw-r--r--jstests/or5.js107
-rw-r--r--jstests/or6.js31
-rw-r--r--jstests/or7.js41
-rw-r--r--jstests/or8.js16
-rw-r--r--jstests/or9.js54
-rw-r--r--jstests/ora.js17
-rw-r--r--jstests/orb.js17
-rw-r--r--jstests/pullall2.js20
-rw-r--r--jstests/ref3.js22
-rw-r--r--jstests/regex5.js22
-rw-r--r--jstests/repair.js2
-rw-r--r--jstests/repl/basic1.js53
-rw-r--r--jstests/repl/block1.js24
-rw-r--r--jstests/repl/block2.js45
-rw-r--r--jstests/repl/mastermaster1.js34
-rw-r--r--jstests/repl/repair.js14
-rw-r--r--jstests/repl/replacePeer2.js14
-rw-r--r--jstests/repl/snapshot2.js13
-rw-r--r--jstests/repl/snapshot3.js17
-rw-r--r--jstests/replsets/replset1.js115
-rw-r--r--jstests/replsets/replset2.js111
-rw-r--r--jstests/replsets/replset3.js56
-rw-r--r--jstests/replsets/replset4.js29
-rw-r--r--jstests/replsets/replset_remove_node.js57
-rw-r--r--jstests/replsets/replsetadd.js31
-rw-r--r--jstests/replsets/replsetarb1.js33
-rw-r--r--jstests/replsets/replsetarb2.js45
-rw-r--r--jstests/replsets/replsetprio1.js53
-rw-r--r--jstests/replsets/replsetrestart1.js57
-rw-r--r--jstests/replsets/replsetrestart2.js46
-rw-r--r--jstests/replsets/rollback.js129
-rw-r--r--jstests/replsets/rollback2.js199
-rw-r--r--jstests/replsets/sync1.js192
-rw-r--r--jstests/replsets/twosets.js36
-rw-r--r--jstests/rs/rs_basic.js177
-rw-r--r--jstests/rs/test_framework.js30
-rw-r--r--jstests/sharding/addshard1.js56
-rw-r--r--jstests/sharding/auto1.js34
-rw-r--r--jstests/sharding/auto2.js105
-rw-r--r--jstests/sharding/bigMapReduce.js17
-rw-r--r--jstests/sharding/count1.js64
-rw-r--r--jstests/sharding/count2.js43
-rw-r--r--jstests/sharding/cursor1.js60
-rw-r--r--jstests/sharding/diffservers1.js2
-rw-r--r--jstests/sharding/error1.js24
-rw-r--r--jstests/sharding/features1.js27
-rw-r--r--jstests/sharding/features2.js53
-rw-r--r--jstests/sharding/features3.js86
-rw-r--r--jstests/sharding/findandmodify1.js42
-rw-r--r--jstests/sharding/key_many.js41
-rw-r--r--jstests/sharding/movePrimary1.js19
-rw-r--r--jstests/sharding/moveshard1.js39
-rw-r--r--jstests/sharding/presplit.js37
-rw-r--r--jstests/sharding/remove1.js16
-rw-r--r--jstests/sharding/rename.js26
-rw-r--r--jstests/sharding/shard1.js14
-rw-r--r--jstests/sharding/shard2.js45
-rw-r--r--jstests/sharding/shard3.js48
-rw-r--r--jstests/sharding/shard6.js75
-rw-r--r--jstests/sharding/sort1.js81
-rw-r--r--jstests/sharding/splitpick.js14
-rw-r--r--jstests/sharding/stats.js60
-rw-r--r--jstests/sharding/sync1.js5
-rw-r--r--jstests/sharding/sync2.js70
-rw-r--r--jstests/sharding/sync3.js10
-rw-r--r--jstests/sharding/sync4.js19
-rw-r--r--jstests/sharding/update1.js19
-rw-r--r--jstests/shellkillop.js2
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/slice1.js68
-rw-r--r--jstests/slowNightly/remove9.js12
-rw-r--r--jstests/slowNightly/run_sharding_passthrough.js94
-rw-r--r--jstests/slowNightly/sharding_balance1.js55
-rw-r--r--jstests/slowNightly/sharding_balance2.js54
-rw-r--r--jstests/slowNightly/sharding_balance3.js57
-rw-r--r--jstests/slowNightly/sharding_balance4.js122
-rw-r--r--jstests/slowNightly/sharding_cursors1.js71
-rw-r--r--jstests/slowNightly/sharding_rs1.js61
-rw-r--r--jstests/slowWeekly/conc_update.js51
-rw-r--r--jstests/slowWeekly/indexbg1.js (renamed from jstests/slow/indexbg1.js)0
-rw-r--r--jstests/slowWeekly/indexbg2.js (renamed from jstests/slow/indexbg2.js)0
-rw-r--r--jstests/slowWeekly/ns1.js (renamed from jstests/slow/ns1.js)0
-rw-r--r--jstests/slowWeekly/query_yield1.js73
-rw-r--r--jstests/slowWeekly/query_yield2.js73
-rw-r--r--jstests/slowWeekly/update_yield1.js78
-rw-r--r--jstests/splitvector.js81
-rw-r--r--jstests/tempCleanup.js16
-rw-r--r--jstests/tool/csv1.js8
-rw-r--r--jstests/tool/exportimport1.js13
-rw-r--r--jstests/tool/files1.js27
-rw-r--r--jstests/update_addToSet2.js11
-rw-r--r--jstests/update_arraymatch4.js18
-rw-r--r--jstests/update_arraymatch5.js15
-rw-r--r--jstests/update_multi4.js18
-rw-r--r--jstests/update_multi5.js17
-rw-r--r--jstests/upsert1.js14
-rw-r--r--jstests/where3.js10
171 files changed, 5946 insertions, 452 deletions
diff --git a/jstests/_fail.js b/jstests/_fail.js
new file mode 100644
index 0000000..9d41d25
--- /dev/null
+++ b/jstests/_fail.js
@@ -0,0 +1,4 @@
+// For testing the test runner.
+assert.eq(1, 2, "fail1")
+
+print("you should not see this") \ No newline at end of file
diff --git a/jstests/_runner.js b/jstests/_runner.js
index f0ce49d..48619c1 100644
--- a/jstests/_runner.js
+++ b/jstests/_runner.js
@@ -3,11 +3,12 @@
//
var files = listFiles("jstests");
+var runnerStart = new Date()
+
files.forEach(
function(x) {
- if ( /_runner/.test(x.name) ||
- /_lodeRunner/.test(x.name) ||
+ if ( /[\/\\]_/.test(x.name) ||
! /\.js$/.test(x.name ) ){
print(" >>>>>>>>>>>>>>> skipping " + x.name);
return;
@@ -22,3 +23,6 @@ files.forEach(
);
+var runnerEnd = new Date()
+
+print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" )
diff --git a/jstests/apitest_db.js b/jstests/apitest_db.js
index 45e25b6..f54879c 100644
--- a/jstests/apitest_db.js
+++ b/jstests/apitest_db.js
@@ -2,6 +2,8 @@
* Tests for the db object enhancement
*/
+assert( "test" == db, "wrong database currently not test" );
+
dd = function( x ){
//print( x );
}
@@ -34,7 +36,7 @@ dd( "d" );
db.createCollection("test");
var found = false;
db.getCollection( "system.namespaces" ).find().forEach( function(x) { if (x.name == "test.test") found = true; });
-assert(found);
+assert(found, "found test.test in system.namespaces");
dd( "e" );
@@ -43,16 +45,16 @@ dd( "e" );
*/
db.setProfilingLevel(0);
-assert(db.getProfilingLevel() == 0);
+assert(db.getProfilingLevel() == 0, "prof level 0");
db.setProfilingLevel(1);
-assert(db.getProfilingLevel() == 1);
+assert(db.getProfilingLevel() == 1, "p1");
db.setProfilingLevel(2);
-assert(db.getProfilingLevel() == 2);
+assert(db.getProfilingLevel() == 2, "p2");
db.setProfilingLevel(0);
-assert(db.getProfilingLevel() == 0);
+assert(db.getProfilingLevel() == 0, "prof level 0");
dd( "f" );
asserted = false;
@@ -64,7 +66,7 @@ catch (e) {
asserted = true;
assert(e.dbSetProfilingException);
}
-assert( asserted );
+assert( asserted, "should have asserted" );
dd( "g" );
diff --git a/jstests/apply_ops1.js b/jstests/apply_ops1.js
new file mode 100644
index 0000000..adfcc27
--- /dev/null
+++ b/jstests/apply_ops1.js
@@ -0,0 +1,51 @@
+
+t = db.apply_ops1;
+t.drop();
+
+assert.eq( 0 , t.find().count() , "A0" );
+db.runCommand( { applyOps : [ { "op" : "i" , "ns" : t.getFullName() , "o" : { _id : 5 , x : 17 } } ] } )
+assert.eq( 1 , t.find().count() , "A1" );
+
+o = { _id : 5 , x : 17 }
+assert.eq( o , t.findOne() , "A2" );
+
+res = db.runCommand( { applyOps : [
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } } ,
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } }
+] } )
+
+o.x++;
+o.x++;
+
+assert.eq( 1 , t.find().count() , "A3" );
+assert.eq( o , t.findOne() , "A4" );
+
+
+res = db.runCommand( { applyOps :
+ [
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } } ,
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } }
+ ]
+ ,
+ preCondition : [ { ns : t.getFullName() , q : { _id : 5 } , res : { x : 19 } } ]
+ } );
+
+o.x++;
+o.x++;
+
+assert.eq( 1 , t.find().count() , "B1" );
+assert.eq( o , t.findOne() , "B2" );
+
+
+res = db.runCommand( { applyOps :
+ [
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } } ,
+ { "op" : "u" , "ns" : t.getFullName() , "o2" : { _id : 5 } , "o" : { $inc : { x : 1 } } }
+ ]
+ ,
+ preCondition : [ { ns : t.getFullName() , q : { _id : 5 } , res : { x : 19 } } ]
+ } );
+
+assert.eq( 1 , t.find().count() , "B3" );
+assert.eq( o , t.findOne() , "B4" );
+
diff --git a/jstests/arrayfind2.js b/jstests/arrayfind2.js
index 59bf2b0..94d77f1 100644
--- a/jstests/arrayfind2.js
+++ b/jstests/arrayfind2.js
@@ -22,14 +22,14 @@ go( "no index" );
t.ensureIndex( { a : 1 } );
go( "index(a)" );
-assert.eq( [], t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
+assert.eq( {}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
t.ensureIndex( { "a.x": 1 } );
-assert.eq( [ [ {"a.x":3},{"a.x":3} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
+assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } } ] } } ).explain().indexBounds );
// only first $elemMatch used to find bounds
-assert.eq( [ [ {"a.x":3},{"a.x":3} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3 } }, { $elemMatch : { y : 5 } } ] } } ).explain().indexBounds );
+assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } }, { $elemMatch : { y : 5 } } ] } } ).explain().indexBounds );
t.ensureIndex( { "a.x":1,"a.y":-1 } );
-assert.eq( [ [ {"a.x":3,"a.y":1.7976931348623157e+308},{"a.x":3,"a.y":4} ] ], t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
+assert.eq( {"a.x":[[3,3]],"a.y":[[1.7976931348623157e+308,4]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
diff --git a/jstests/capped3.js b/jstests/capped3.js
index c4f1a3c..d6d2b23 100644
--- a/jstests/capped3.js
+++ b/jstests/capped3.js
@@ -23,9 +23,10 @@ c = t2.find().sort( {$natural:-1} );
i = 999;
while( c.hasNext() ) {
assert.eq( i--, c.next().i, "E" );
-}
-print( "i: " + i );
-print( "stats: " + tojson( t2.stats() ) );
+}
+//print( "i: " + i );
+var str = tojson( t2.stats() );
+//print( "stats: " + tojson( t2.stats() ) );
assert( i < 990, "F" );
t.drop();
diff --git a/jstests/capped6.js b/jstests/capped6.js
new file mode 100644
index 0000000..851bbd1
--- /dev/null
+++ b/jstests/capped6.js
@@ -0,0 +1,82 @@
+Random.setRandomSeed();
+
+db.capped6.drop();
+db._dbCommand( { create: "capped6", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+tzz = db.capped6;
+
+function debug( x ) {
+// print( x );
+}
+
+function checkOrder( i ) {
+ res = tzz.find().sort( { $natural: -1 } );
+ assert( res.hasNext(), "A" );
+ var j = i;
+ while( res.hasNext() ) {
+ try {
+ assert.eq( val[ j-- ].a, res.next().a, "B" );
+ } catch( e ) {
+ debug( "capped6 err " + j );
+ throw e;
+ }
+ }
+ res = tzz.find().sort( { $natural: 1 } );
+ assert( res.hasNext(), "C" );
+ while( res.hasNext() )
+ assert.eq( val[ ++j ].a, res.next().a, "D" );
+ assert.eq( j, i, "E" );
+}
+
+var val = new Array( 500 );
+var c = "";
+for( i = 0; i < 500; ++i, c += "-" ) {
+ val[ i ] = { a: c };
+}
+
+var oldMax = Random.randInt( 500 );
+var max = 0;
+
+function doTest() {
+ for( var i = max; i < oldMax; ++i ) {
+ tzz.save( val[ i ] );
+ }
+ max = oldMax;
+ count = tzz.count();
+
+ var min = 1;
+ if ( Random.rand() > 0.3 ) {
+ min = Random.randInt( count ) + 1;
+ }
+
+ while( count > min ) {
+ var n = Random.randInt( count - min - 1 ); // 0 <= x <= count - min - 1
+ var inc = Random.rand() > 0.5;
+ debug( count + " " + n + " " + inc );
+ assert.commandWorked( db.runCommand( { captrunc:"capped6", n:n, inc:inc } ) );
+ if ( inc ) {
+ n += 1;
+ }
+ count -= n;
+ max -= n;
+ checkOrder( max - 1 );
+ }
+}
+
+for( var i = 0; i < 10; ++i ) {
+ doTest();
+}
+
+// reverse order of values
+var val = new Array( 500 );
+
+var c = "";
+for( i = 499; i >= 0; --i, c += "-" ) {
+ val[ i ] = { a: c };
+}
+db.capped6.drop();
+db._dbCommand( { create: "capped6", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+tzz = db.capped6;
+
+for( var i = 0; i < 10; ++i ) {
+ doTest();
+}
diff --git a/jstests/capped7.js b/jstests/capped7.js
new file mode 100644
index 0000000..ecb689e
--- /dev/null
+++ b/jstests/capped7.js
@@ -0,0 +1,72 @@
+Random.setRandomSeed();
+
+db.capped7.drop();
+db._dbCommand( { create: "capped7", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+tzz = db.capped7;
+
+var ten = new Array( 11 ).toString().replace( /,/g, "-" );
+
+count = 0;
+
+function insertUntilFull() {
+count = tzz.count();
+ var j = 0;
+while( 1 ) {
+ tzz.save( {i:ten,j:j++} );
+ var newCount = tzz.count();
+ if ( count == newCount ) {
+ break;
+ }
+ count = newCount;
+}
+}
+
+insertUntilFull();
+
+oldCount = count;
+
+assert.eq.automsg( "11", "tzz.stats().numExtents" );
+var oldSize = tzz.stats().storageSize;
+
+assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+
+assert.eq.automsg( "11", "tzz.stats().numExtents" );
+assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+
+assert.eq.automsg( "0", "tzz.find().itcount()" );
+assert.eq.automsg( "0", "tzz.count()" );
+
+insertUntilFull();
+
+assert.eq.automsg( "oldCount", "count" );
+assert.eq.automsg( "oldCount", "tzz.find().itcount()" );
+assert.eq.automsg( "oldCount", "tzz.count()" );
+
+assert.eq.automsg( "11", "tzz.stats().numExtents" );
+var oldSize = tzz.stats().storageSize;
+
+assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+
+assert.eq.automsg( "11", "tzz.stats().numExtents" );
+assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+
+var total = Random.randInt( 2000 );
+for( var j = 1; j <= total; ++j ) {
+ tzz.save( {i:ten,j:j} );
+ if ( Random.rand() > 0.95 ) {
+ assert.automsg( "j >= tzz.count()" );
+ assert.eq.automsg( "tzz.count()", "tzz.find().itcount()" );
+ var c = tzz.find().sort( {$natural:-1} );
+ var k = j;
+ assert.automsg( "c.hasNext()" );
+ while( c.hasNext() ) {
+ assert.eq.automsg( "c.next().j", "k--" );
+ }
+ var c = tzz.find().sort( {$natural:1} );
+ assert.automsg( "c.hasNext()" );
+ while( c.hasNext() ) {
+ assert.eq.automsg( "c.next().j", "++k" );
+ }
+ assert.eq.automsg( "j", "k" );
+ }
+} \ No newline at end of file
diff --git a/jstests/clone/clonecollection.js b/jstests/clone/clonecollection.js
index b1f9c29..ea5f229 100644
--- a/jstests/clone/clonecollection.js
+++ b/jstests/clone/clonecollection.js
@@ -2,65 +2,6 @@
var baseName = "jstests_clonecollection";
-parallel = function() {
- return t.parallelStatus;
-}
-
-resetParallel = function() {
- parallel().drop();
-}
-
-doParallel = function( work ) {
- resetParallel();
- startMongoProgramNoConnect( "mongo", "--port", ports[ 1 ], "--eval", work + "; db.parallelStatus.save( {done:1} );", baseName );
-}
-
-doneParallel = function() {
- return !!parallel().findOne();
-}
-
-waitParallel = function() {
- assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
-}
-
-cloneNo = -1;
-startstartclone = function( spec ) {
- spec = spec || "";
- cloneNo++;
- doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\"" + spec + " } ); print( \"clone_clone_clone_commandResult::" + cloneNo + "::\" + tojson( z , '' , true ) + \":::::\" );" );
-}
-
-finishstartclone = function() {
- waitParallel();
- // even after parallel shell finished, must wait for finishToken line to appear in log
- assert.soon( function() {
- raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
- ret = raw.match( new RegExp( "clone_clone_clone_commandResult::" + cloneNo + "::(.*):::::" ) );
- if ( ret == null ) {
- return false;
- }
- ret = ret[ 1 ];
- return true;
- } );
-
- eval( "ret = " + ret );
-
- assert.commandWorked( ret );
- return ret;
-}
-
-dofinishclonecmd = function( ret ) {
- finishToken = ret.finishToken;
- // Round-tripping through JS can corrupt the cursor ids we store as BSON
- // Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
- // makes the db start scanning from the beginning of the collection.
- finishToken.cursorId = new Date( 0 );
- return t.runCommand( {finishCloneCollection:finishToken} );
-}
-
-finishclone = function( ret ) {
- assert.commandWorked( dofinishclonecmd( ret ) );
-}
ports = allocatePorts( 2 );
@@ -91,7 +32,9 @@ if ( t.system.indexes.find().count() != 2 ) {
}
assert.eq( 2, t.system.indexes.find().count(), "expected index missing" );
// Verify index works
-assert.eq( 50, t.a.find( { i: 50 } ).hint( { i: 1 } ).explain().indexBounds[0][0].i , "verify 1" );
+x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain()
+printjson( x )
+assert.eq( 50, x.indexBounds.i[0][0] , "verify 1" );
assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
// Check that capped-ness is preserved on clone
@@ -103,91 +46,4 @@ assert( f.a.isCapped() );
assert.commandWorked( t.cloneCollection( "localhost:" + ports[ 0 ], "a" ) );
assert( t.a.isCapped(), "cloned collection not capped" );
-// Now test insert + delete + update during clone
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 100000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 100000, f.a.count() );
-
-startstartclone( ", query:{i:{$gte:0}}" );
-
-sleep( 200 );
-f.a.save( { i: 200000 } );
-f.a.save( { i: -1 } );
-f.a.remove( { i: 0 } );
-f.a.update( { i: 99998 }, { i: 99998, x: "y" } );
-assert.eq( 100001, f.a.count() , "D0" );
-ret = finishstartclone();
-finishclone( ret );
-
-assert.eq( 100000, t.a.find().count() , "D1" );
-assert.eq( 1, t.a.find( { i: 200000 } ).count() , "D2" );
-assert.eq( 0, t.a.find( { i: -1 } ).count() , "D3" );
-assert.eq( 0, t.a.find( { i: 0 } ).count() , "D4" );
-assert.eq( 1, t.a.find( { i: 99998, x: "y" } ).count() , "D5" );
-
-
-// Now test oplog running out of space -- specify small size clone oplog for test.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 200000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 200000, f.a.count() , "E1" );
-
-startstartclone( ", logSizeMb:1" );
-ret = finishstartclone();
-
-for( i = 200000; i < 250000; ++i ) {
- f.a.save( { i: i } );
-}
-
-assert.eq( 250000, f.a.count() , "F0" );
-
-assert.commandFailed( dofinishclonecmd( ret ) );
-
-// Make sure the same works with standard size op log.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 200000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 200000, f.a.count() , "F1" );
-
-startstartclone();
-ret = finishstartclone();
-
-for( i = 200000; i < 250000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 250000, f.a.count() , "F2" );
-
-finishclone( ret );
-assert.eq( 250000, t.a.find().count() , "F3" );
-
-// Test startCloneCollection and finishCloneCollection commands.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 100000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 100000, f.a.count() , "G1" );
-
-startstartclone();
-
-sleep( 200 );
-f.a.save( { i: -1 } );
-
-ret = finishstartclone();
-assert.eq( 100001, t.a.find().count() , "G2" );
-f.a.save( { i: -2 } );
-assert.eq( 100002, f.a.find().count() , "G3" );
-finishclone( ret );
-assert.eq( 100002, t.a.find().count() , "G4" );
diff --git a/jstests/conc_update.js b/jstests/conc_update.js
new file mode 100644
index 0000000..ac70861
--- /dev/null
+++ b/jstests/conc_update.js
@@ -0,0 +1,45 @@
+// db = db.getSisterDB("concurrency")
+// db.dropDatabase();
+//
+// NRECORDS=10*1024*1024 // this needs to be relatively big so that
+// // the update() will take a while.
+//
+// print("loading data (will take a while; progress msg every 1024*1024 documents)")
+// for (i=0; i<(10*1024*1024); i++) {
+// db.conc.insert({x:i})
+// if ((i%(1024*1024))==0)
+// print("loaded " + i/(1024*1024) + " mibi-records")
+// }
+//
+// print("making an index (will take a while)")
+// db.conc.ensureIndex({x:1})
+//
+// var c1=db.conc.count({x:{$lt:NRECORDS}})
+// // this is just a flag that the child will toggle when it's done.
+// db.concflag.update({}, {inprog:true}, true)
+//
+// updater=startParallelShell("db=db.getSisterDB('concurrency');\
+// db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
+// print(db.getLastError());\
+// db.concflag.update({},{inprog:false})");
+//
+// querycount=0;
+// decrements=0;
+// misses=0
+// while (1) {
+// if (db.concflag.findOne().inprog) {
+// c2=db.conc.count({x:{$lt:10*1024*1024}})
+// print(c2)
+// querycount++;
+// if (c2<c1)
+// decrements++;
+// else
+// misses++;
+// c1 = c2;
+// } else
+// break;
+// sleep(10);
+// }
+// print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+//
+// updater() // wait()
diff --git a/jstests/copydb2.js b/jstests/copydb-auth.js
index 90ef943..90ef943 100644
--- a/jstests/copydb2.js
+++ b/jstests/copydb-auth.js
diff --git a/jstests/cursor8.js b/jstests/cursor8.js
index 5ebd4f5..b50fe3b 100644
--- a/jstests/cursor8.js
+++ b/jstests/cursor8.js
@@ -1,9 +1,13 @@
-t = db.f
+// This should get skipped when testing replication.
+
+t = db.cursor8;
t.drop();
t.save( {} );
t.save( {} );
t.save( {} );
+assert.eq( 3 , t.find().count() , "A0" );
+
db.getMongo().getDB( "admin" ).runCommand( {closeAllDatabases:1} );
function test( want , msg ){
@@ -12,8 +16,8 @@ function test( want , msg ){
}
test( 0 , "A1" );
-assert.eq( 3 , t.find().count() , "A1" );
-assert.eq( 3 , t.find( {} ).count() , "A2" );
-assert.eq( 2, t.find( {} ).limit( 2 ).itcount() , "A3" );
+assert.eq( 3 , t.find().count() , "A2" );
+assert.eq( 3 , t.find( {} ).count() , "A3" );
+assert.eq( 2, t.find( {} ).limit( 2 ).itcount() , "A4" );
test( 1 , "B1" );
diff --git a/jstests/cursora.js b/jstests/cursora.js
new file mode 100644
index 0000000..0916fa7
--- /dev/null
+++ b/jstests/cursora.js
@@ -0,0 +1,34 @@
+
+t = db.cursora
+
+
+
+function run( n , atomic ){
+
+ t.drop()
+
+ for ( i=0; i<n; i++ )
+ t.insert( { _id : i } )
+ db.getLastError()
+
+ join = startParallelShell( "sleep(50); db.cursora.remove( {" + ( atomic ? "$atomic:true" : "" ) + "} ); db.getLastError();" );
+
+ start = new Date()
+ num = t.find( function(){ num = 2; for ( var x=0; x<1000; x++ ) num += 2; return num > 0; } ).sort( { _id : -1 } ).limit(n).itcount()
+ end = new Date()
+
+ join()
+
+ print( "num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
+ assert.eq( 0 , t.count() , "after remove" )
+ if ( n == num )
+ print( "warning: shouldn't have counted all n: " + n + " num: " + num );
+}
+
+run( 1500 )
+run( 5000 )
+
+run( 1500 , true )
+run( 5000 , true )
+
+
diff --git a/jstests/datasize.js b/jstests/datasize.js
index 396d24d..277efac 100644
--- a/jstests/datasize.js
+++ b/jstests/datasize.js
@@ -5,7 +5,7 @@ assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'fg'} );
-assert.eq( 65, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.drop();
f.ensureIndex( {qq:1} );
@@ -13,14 +13,14 @@ assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'c'} );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
f.save( {qq:'fg'} );
-assert.eq( 65, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}} ).ok );
-assert.eq( 65, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'z' }} ).size );
+assert.eq( 68, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'z' }} ).size );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }} ).size );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{qq:1}} ).size );
-assert.eq( 33, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'d'}, max:{qq:'z' }, keyPattern:{qq:1}} ).size );
+assert.eq( 36, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'d'}, max:{qq:'z' }, keyPattern:{qq:1}} ).size );
assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'c' }} ).size );
assert.eq( 32, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'d' }} ).size );
diff --git a/jstests/datasize2.js b/jstests/datasize2.js
new file mode 100644
index 0000000..103cb20
--- /dev/null
+++ b/jstests/datasize2.js
@@ -0,0 +1,27 @@
+
+t = db.datasize2
+t.drop();
+
+N = 1000
+for ( i=0; i<N; i++ ){
+ t.insert( { _id : i , s : "asdasdasdasdasdasdasd" } );
+}
+
+c = { dataSize : "test.datasize2" ,
+ "keyPattern" : {
+ "_id" : 1
+ },
+ "min" : {
+ "_id" : 0
+ },
+ "max" : {
+ "_id" : N
+ }
+ };
+
+
+assert.eq( N , db.runCommand( c ).numObjects , "A" )
+
+c.maxObjects = 100;
+assert.eq( 101 , db.runCommand( c ).numObjects , "B" )
+
diff --git a/jstests/dbadmin.js b/jstests/dbadmin.js
index 8d0e7d1..8ea0426 100644
--- a/jstests/dbadmin.js
+++ b/jstests/dbadmin.js
@@ -16,6 +16,9 @@ else {
t.save( { x : 1 } );
res = db._adminCommand( "listDatabases" );
-assert( res.databases.length > 0 , "listDatabases 1" );
+assert( res.databases && res.databases.length > 0 , "listDatabases 1 " + tojson(res) );
+
+x = db._adminCommand( "ismaster" );
+assert( x.ismaster , "ismaster failed: " + tojson( x ) )
// TODO: add more tests here
diff --git a/jstests/dbcase.js b/jstests/dbcase.js
new file mode 100644
index 0000000..bf0c8e6
--- /dev/null
+++ b/jstests/dbcase.js
@@ -0,0 +1,23 @@
+
+a = db.getSisterDB( "test_dbnamea" )
+b = db.getSisterDB( "test_dbnameA" )
+
+a.dropDatabase();
+b.dropDatabase();
+
+a.foo.save( { x : 1 } )
+z = db.getLastErrorObj();
+assert.eq( 0 , z.code || 0 , "A : " + tojson(z) )
+
+b.foo.save( { x : 1 } )
+z = db.getLastErrorObj();
+assert.eq( 13297 , z.code || 0 , "B : " + tojson(z) )
+
+print( db.getMongo().getDBNames() )
+
+a.dropDatabase();
+b.dropDatabase();
+
+print( db.getMongo().getDBNames() )
+
+
diff --git a/jstests/dbhash.js b/jstests/dbhash.js
index 101be18..e9cbc94 100644
--- a/jstests/dbhash.js
+++ b/jstests/dbhash.js
@@ -5,6 +5,15 @@ b = db.dbhashb;
a.drop();
b.drop();
+// debug SERVER-761
+db.getCollectionNames().forEach( function( x ) {
+ v = db[ x ].validate();
+ if ( !v.valid ) {
+ print( x );
+ printjson( v );
+ }
+ } );
+
function gh( coll , mydb ){
if ( ! mydb ) mydb = db;
var x = mydb.runCommand( "dbhash" ).collections[coll.getName()];
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index a5fd18e..90a1f03 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -6,7 +6,7 @@ dbpath = "/data/db/" + baseDir + "/";
var m = startMongod( "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
db[ baseName ].save( {} );
-assert.eq( 1, db[ baseName ].count() );
+assert.eq( 1, db[ baseName ].count() , "A : " + tojson( db[baseName].find().toArray() ) );
checkDir = function( dir ) {
db.runCommand( {fsync:1} );
@@ -22,7 +22,7 @@ checkDir = function( dir ) {
files = listFiles( dir + baseName );
for( f in files ) {
- assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) );
+ assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) , "B dir:" + dir + " f: " + f );
}
}
checkDir( dbpath );
@@ -40,7 +40,7 @@ for( f in files ) {
}
}
checkDir( backupDir );
-assert.eq( 1, db[ baseName ].count() );
+assert.eq( 1, db[ baseName ].count() , "C" );
// tool test
stopMongod( port );
@@ -53,7 +53,7 @@ runMongoProgram( "mongorestore", "--dbpath", dbpath, "--directoryperdb", "--dir"
m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
checkDir( dbpath );
-assert.eq( 1, db[ baseName ].count() );
+assert.eq( 1, db[ baseName ].count() , "C" );
assert( m.getDBs().totalSize > 0, "bad size calc" );
// drop db test
diff --git a/jstests/disk/repair.js b/jstests/disk/repair.js
index 6c8d81b..1308beb 100644
--- a/jstests/disk/repair.js
+++ b/jstests/disk/repair.js
@@ -1,3 +1,5 @@
+// check --repairpath and --repair
+
var baseName = "jstests_disk_repair";
port = allocatePorts( 1 )[ 0 ];
@@ -10,9 +12,36 @@ resetDbpath( repairpath );
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
db = m.getDB( baseName );
db[ baseName ].save( {} );
-db.runCommand( {repairDatabase:1, backupOriginalFiles:true} );
+assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+function check() {
+ files = listFiles( dbpath );
+ for( f in files ) {
+ assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ }
-files = listFiles( dbpath );
-for( f in files ) {
- assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ assert.eq.automsg( "1", "db[ baseName ].count()" );
}
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+assert.commandWorked( db.runCommand( {repairDatabase:1} ) );
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+rc = runMongoProgram( "mongod", "--repair", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+assert.eq.automsg( "0", "rc" );
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+rc = runMongoProgram( "mongod", "--repair", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+assert.eq.automsg( "0", "rc" );
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+check();
diff --git a/jstests/disk/repair2.js b/jstests/disk/repair2.js
new file mode 100644
index 0000000..a28ef79
--- /dev/null
+++ b/jstests/disk/repair2.js
@@ -0,0 +1,47 @@
+// repair with --directoryperdb
+
+var baseName = "jstests_disk_repair2";
+
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseName + "/";
+repairpath = dbpath + "repairDir/"
+
+resetDbpath( dbpath );
+resetDbpath( repairpath );
+
+m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+db[ baseName ].save( {} );
+assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+function check() {
+ files = listFiles( dbpath );
+ for( f in files ) {
+ assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ }
+
+ assert.eq.automsg( "1", "db[ baseName ].count()" );
+}
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+assert.commandWorked( db.runCommand( {repairDatabase:1} ) );
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+rc = runMongoProgram( "mongod", "--repair", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+assert.eq.automsg( "0", "rc" );
+m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+check();
+stopMongod( port );
+
+resetDbpath( repairpath );
+rc = runMongoProgram( "mongod", "--repair", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+assert.eq.automsg( "0", "rc" );
+m = startMongoProgram( "mongod", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+check();
diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js
new file mode 100644
index 0000000..c986dce
--- /dev/null
+++ b/jstests/disk/repair3.js
@@ -0,0 +1,52 @@
+// test --repairpath on aother partition
+
+var baseName = "jstests_disk_repair3";
+var repairbase = "/data/db/repairpartitiontest"
+var repairpath = repairbase + "/dir"
+
+doIt = false;
+files = listFiles( "/data/db" );
+for ( i in files ) {
+ if ( files[ i ].name == repairbase ) {
+ doIt = true;
+ }
+}
+
+if ( !doIt ) {
+ print( "path " + repairpath + " missing, skipping repair3 test" );
+ doIt = false;
+}
+
+if ( doIt ) {
+
+ port = allocatePorts( 1 )[ 0 ];
+ dbpath = "/data/db/" + baseName + "/";
+
+ resetDbpath( dbpath );
+ resetDbpath( repairpath );
+
+ m = startMongoProgram( "mongod", "--nssize", "8", "--noprealloc", "--smallfiles", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ db = m.getDB( baseName );
+ db[ baseName ].save( {} );
+ assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:false} ) );
+ function check() {
+ files = listFiles( dbpath );
+ for( f in files ) {
+ assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ }
+
+ assert.eq.automsg( "1", "db[ baseName ].count()" );
+ }
+
+ check();
+ stopMongod( port );
+
+ resetDbpath( repairpath );
+ rc = runMongoProgram( "mongod", "--nssize", "8", "--noprealloc", "--smallfiles", "--repair", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ assert.eq.automsg( "0", "rc" );
+ m = startMongoProgram( "mongod", "--nssize", "8", "--noprealloc", "--smallfiles", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ db = m.getDB( baseName );
+ check();
+ stopMongod( port );
+
+} \ No newline at end of file
diff --git a/jstests/disk/repair4.js b/jstests/disk/repair4.js
new file mode 100644
index 0000000..64c0f37
--- /dev/null
+++ b/jstests/disk/repair4.js
@@ -0,0 +1,44 @@
+// test that disk space check happens on --repairpath partition
+
+var baseName = "jstests_disk_repair4";
+var smallbase = "/data/db/repairpartitiontest"
+var smallpath = smallbase + "/dir"
+
+doIt = false;
+files = listFiles( "/data/db" );
+for ( i in files ) {
+ if ( files[ i ].name == smallbase ) {
+ doIt = true;
+ }
+}
+
+if ( !doIt ) {
+ print( "path " + smallpath + " missing, skipping repair4 test" );
+ doIt = false;
+}
+
+if ( doIt ) {
+
+ port = allocatePorts( 1 )[ 0 ];
+ repairpath = "/data/db/" + baseName + "/";
+
+ resetDbpath( smallpath );
+ resetDbpath( repairpath );
+
+ m = startMongoProgram( "mongod", "--nssize", "8", "--noprealloc", "--smallfiles", "--port", port, "--dbpath", smallpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ db = m.getDB( baseName );
+ db[ baseName ].save( {} );
+ assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+ function check() {
+ files = listFiles( smallpath );
+ for( f in files ) {
+ assert( ! new RegExp( "^" + smallpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ }
+
+ assert.eq.automsg( "1", "db[ baseName ].count()" );
+ }
+
+ check();
+ stopMongod( port );
+
+} \ No newline at end of file
diff --git a/jstests/distinct_array1.js b/jstests/distinct_array1.js
new file mode 100644
index 0000000..0d41b80
--- /dev/null
+++ b/jstests/distinct_array1.js
@@ -0,0 +1,24 @@
+t = db.distinct_array1;
+t.drop();
+
+t.save( { a : [1,2,3] } )
+t.save( { a : [2,3,4] } )
+t.save( { a : [3,4,5] } )
+t.save( { a : 9 } )
+
+
+res = t.distinct( "a" );
+assert.eq( "1,2,3,4,5,9" , res.toString() , "A1" );
+
+
+//t.drop();
+
+t.save( { a : [{b:"a"}, {b:"d"}] , c : 12 } );
+t.save( { a : [{b:"b"}, {b:"d"}] , c : 12 } );
+t.save( { a : [{b:"c"}, {b:"e"}] , c : 12 } );
+t.save( { a : [{b:"c"}, {b:"f"}] , c : 12 } );
+t.save( { a : [] , c : 12 } );
+t.save( { a : { b : "z"} , c : 12 } );
+
+res = t.distinct( "a.b" );
+assert.eq( "a,b,c,d,e,f,z" , res.toString() , "B1" );
diff --git a/jstests/distinct_speed1.js b/jstests/distinct_speed1.js
new file mode 100644
index 0000000..4cae5b0
--- /dev/null
+++ b/jstests/distinct_speed1.js
@@ -0,0 +1,26 @@
+
+t = db.distinct_speed1;
+
+t.drop();
+for ( var i=0; i<10000; i++ ){
+ t.save( { x : i % 10 } );
+}
+
+assert.eq( 10 , t.distinct("x").length , "A1" );
+
+function fast(){
+ t.find().explain().millis;
+}
+
+function slow(){
+ t.distinct("x");
+}
+
+for ( i=0; i<3; i++ ){
+ print( "it: " + Date.timeFunc( fast ) );
+ print( "di: " + Date.timeFunc( slow ) );
+}
+
+
+t.ensureIndex( { x : 1 } );
+t.distinct( "x" , { x : 5 } )
diff --git a/jstests/drop.js b/jstests/drop.js
index 1bd539e..e1ecf8d 100644
--- a/jstests/drop.js
+++ b/jstests/drop.js
@@ -13,7 +13,7 @@ assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_drop"} ).count() , "D" )
f.resetIndexCache();
f.ensureIndex( {a:1} );
assert.eq( 2, db.system.indexes.find( {ns:"test.jstests_drop"} ).count() , "E" );
-assert.commandWorked( db.runCommand( {deleteIndexes:"jstests_drop",index:"*"} ) );
+assert.commandWorked( db.runCommand( {deleteIndexes:"jstests_drop",index:"*"} ), "delete indexes A" );
assert.eq( 1, db.system.indexes.find( {ns:"test.jstests_drop"} ).count() , "G" );
// make sure we can still use it
diff --git a/jstests/evalb.js b/jstests/evalb.js
index 3bc3db1..177930c 100644
--- a/jstests/evalb.js
+++ b/jstests/evalb.js
@@ -10,5 +10,8 @@ db.setProfilingLevel( 2 );
assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "B" );
+o = db.system.profile.find().sort( { $natural : -1 } ).limit(1).next();
+assert( o.info.indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
+
db.setProfilingLevel( 0 );
diff --git a/jstests/explain2.js b/jstests/explain2.js
index 5a36552..4960e5a 100644
--- a/jstests/explain2.js
+++ b/jstests/explain2.js
@@ -19,7 +19,7 @@ q = { a : { $gt : 3 } }
go( q , 6 , 7 , 6 );
q.b = 5
-go( q , 1 , 6 , 1 );
+go( q , 1 , 1 , 1 );
delete q.b
q.c = 5
diff --git a/jstests/find_and_modify.js b/jstests/find_and_modify.js
index 5e10079..a80859a 100644
--- a/jstests/find_and_modify.js
+++ b/jstests/find_and_modify.js
@@ -33,6 +33,6 @@ assert.eq(out.priority, 1);
out = t.findAndModify({sort:{priority:1}, remove:1});
assert.eq(out.priority, 2);
-// return empty obj if no matches (drivers may handle this differently)
+// return null (was {} before 1.5.4) if no matches (drivers may handle this differently)
out = t.findAndModify({query:{no_such_field:1}, remove:1});
-assert.eq(out, {});
+assert.eq(out, null);
diff --git a/jstests/find_and_modify2.js b/jstests/find_and_modify2.js
new file mode 100644
index 0000000..108fc0f
--- /dev/null
+++ b/jstests/find_and_modify2.js
@@ -0,0 +1,10 @@
+t = db.find_and_modify2;
+t.drop();
+
+t.insert({_id:1, i:0, j:0});
+
+out = t.findAndModify({update: {$inc: {i:1}}, 'new': true, fields: {i:1}});
+assert.eq(out, {_id:1, i:1});
+
+out = t.findAndModify({update: {$inc: {i:1}}, fields: {i:0}});
+assert.eq(out, {_id:1, j:0});
diff --git a/jstests/find_and_modify3.js b/jstests/find_and_modify3.js
new file mode 100644
index 0000000..1d30204
--- /dev/null
+++ b/jstests/find_and_modify3.js
@@ -0,0 +1,21 @@
+t = db.find_and_modify3;
+t.drop();
+
+t.insert({_id:0, other:0, comments:[{i:0, j:0}, {i:1, j:1}]});
+t.insert({_id:1, other:1, comments:[{i:0, j:0}, {i:1, j:1}]}); // this is the only one that gets modded
+t.insert({_id:2, other:2, comments:[{i:0, j:0}, {i:1, j:1}]});
+
+orig0 = t.findOne({_id:0})
+orig2 = t.findOne({_id:2})
+
+out = t.findAndModify({query: {_id:1, 'comments.i':0}, update: {$set: {'comments.$.j':2}}, 'new': true});
+assert.eq(out.comments[0], {i:0, j:2});
+assert.eq(out.comments[1], {i:1, j:1});
+assert.eq(t.findOne({_id:0}), orig0);
+assert.eq(t.findOne({_id:2}), orig2);
+
+out = t.findAndModify({query: {other:1, 'comments.i':1}, update: {$set: {'comments.$.j':3}}, 'new': true});
+assert.eq(out.comments[0], {i:0, j:2});
+assert.eq(out.comments[1], {i:1, j:3});
+assert.eq(t.findOne({_id:0}), orig0);
+assert.eq(t.findOne({_id:2}), orig2);
diff --git a/jstests/find_and_modify4.js b/jstests/find_and_modify4.js
new file mode 100644
index 0000000..ad1fb36
--- /dev/null
+++ b/jstests/find_and_modify4.js
@@ -0,0 +1,55 @@
+t = db.find_and_modify4;
+t.drop();
+
+// this is the best way to build auto-increment
+function getNextVal(counterName){
+ var ret = t.findAndModify({
+ query: {_id: counterName},
+ update: {$inc: {val: 1}},
+ upsert: true,
+ 'new': true,
+ });
+ return ret.val;
+}
+
+assert.eq(getNextVal("a"), 1);
+assert.eq(getNextVal("a"), 2);
+assert.eq(getNextVal("a"), 3);
+assert.eq(getNextVal("z"), 1);
+assert.eq(getNextVal("z"), 2);
+assert.eq(getNextVal("a"), 4);
+
+t.drop();
+
+function helper(upsert){
+ return t.findAndModify({
+ query: {_id: "asdf"},
+ update: {$inc: {val: 1}},
+ upsert: upsert,
+ 'new': false // the default
+ });
+}
+
+// upsert:false so nothing there before and after
+assert.eq(helper(false), null);
+assert.eq(t.count(), 0);
+
+// upsert:false so nothing there before; something there after
+assert.eq(helper(true), {});
+assert.eq(t.count(), 1);
+assert.eq(helper(true), {_id: 'asdf', val: 1});
+assert.eq(helper(false), {_id: 'asdf', val: 2}); // upsert only matters when obj doesn't exist
+assert.eq(helper(true), {_id: 'asdf', val: 3});
+
+
+// _id created if not specified
+var out = t.findAndModify({
+ query: {a:1},
+ update: {$set: {b: 2}},
+ upsert: true,
+ 'new': true
+ });
+assert.neq(out._id, undefined);
+assert.eq(out.a, 1);
+assert.eq(out.b, 2);
+
diff --git a/jstests/fm4.js b/jstests/fm4.js
new file mode 100644
index 0000000..1ce947a
--- /dev/null
+++ b/jstests/fm4.js
@@ -0,0 +1,16 @@
+t = db.fm4
+t.drop();
+
+t.insert({_id:1, a:1, b:1});
+
+assert.eq( t.findOne({}, {_id:1}), {_id:1}, 1)
+assert.eq( t.findOne({}, {_id:0}), {a:1, b:1}, 2)
+
+assert.eq( t.findOne({}, {_id:1, a:1}), {_id:1, a:1}, 3)
+assert.eq( t.findOne({}, {_id:0, a:1}), {a:1}, 4)
+
+assert.eq( t.findOne({}, {_id:0, a:0}), {b:1}, 6)
+assert.eq( t.findOne({}, { a:0}), {_id:1, b:1}, 5)
+
+// not sure if we want to suport this since it is the same as above
+//assert.eq( t.findOne({}, {_id:1, a:0}), {_id:1, b:1}, 5)
diff --git a/jstests/geo2.js b/jstests/geo2.js
index 6b1a1a2..ff4552b 100644
--- a/jstests/geo2.js
+++ b/jstests/geo2.js
@@ -43,6 +43,7 @@ printjson( t.find( { loc : { $near : [ 50 , 50 ] } } ).explain() )
assert.lt( 3 , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(50) ) , "C1" )
assert.gt( 3 , a( t.find( { loc : { $near : [ 50 , 50 , 3 ] } } ).limit(50) ) , "C2" )
+assert.gt( 3 , a( t.find( { loc : { $near : [ 50 , 50 ] , $maxDistance : 3 } } ).limit(50) ) , "C3" )
diff --git a/jstests/geo3.js b/jstests/geo3.js
index 6bf27f9..ea6b497 100644
--- a/jstests/geo3.js
+++ b/jstests/geo3.js
@@ -18,7 +18,7 @@ fast = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 }
slow = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , start : "11" } );
-//printjson( slow.stats );
+printjson( slow.stats );
assert.lt( fast.stats.nscanned * 10 , slow.stats.nscanned , "A1" );
assert.lt( fast.stats.objectsLoaded , slow.stats.objectsLoaded , "A2" );
diff --git a/jstests/geo_box3.js b/jstests/geo_box3.js
new file mode 100644
index 0000000..8941f63
--- /dev/null
+++ b/jstests/geo_box3.js
@@ -0,0 +1,36 @@
+// How to construct a test to stress the flaw in SERVER-994:
+// construct an index, think up a bounding box inside the index that
+// doesn't include the center of the index, and put a point inside the
+// bounding box.
+
+// This is the bug reported in SERVER-994.
+t=db.geo_box3;
+t.drop();
+t.insert({ point : { x : -15000000, y : 10000000 } });
+t.ensureIndex( { point : "2d" } , { min : -21000000 , max : 21000000 } );
+var c=t.find({point: {"$within": {"$box": [[-20000000, 7000000], [0, 15000000]]} } });
+assert.eq(1, c.count(), "A1");
+
+// Same thing, modulo 1000000.
+t=db.geo_box3;
+t.drop();
+t.insert({ point : { x : -15, y : 10 } });
+t.ensureIndex( { point : "2d" } , { min : -21 , max : 21 } );
+var c=t.find({point: {"$within": {"$box": [[-20, 7], [0, 15]]} } });
+assert.eq(1, c.count(), "B1");
+
+// Two more examples, one where the index is centered at the origin,
+// one not.
+t=db.geo_box3;
+t.drop();
+t.insert({ point : { x : 1.0 , y : 1.0 } });
+t.ensureIndex( { point : "2d" } , { min : -2 , max : 2 } );
+var c=t.find({point: {"$within": {"$box": [[.1, .1], [1.99, 1.99]]} } });
+assert.eq(1, c.count(), "C1");
+
+t=db.geo_box3;
+t.drop();
+t.insert({ point : { x : 3.9 , y : 3.9 } });
+t.ensureIndex( { point : "2d" } , { min : 0 , max : 4 } );
+var c=t.find({point: {"$within": {"$box": [[2.05, 2.05], [3.99, 3.99]]} } });
+assert.eq(1, c.count(), "D1");
diff --git a/jstests/geo_circle2.js b/jstests/geo_circle2.js
new file mode 100644
index 0000000..0232490
--- /dev/null
+++ b/jstests/geo_circle2.js
@@ -0,0 +1,23 @@
+
+t = db.geo_circle2;
+t.drop();
+
+t.ensureIndex({loc : "2d", categories:1}, {"name":"placesIdx", "min": -100, "max": 100});
+
+t.insert({ "uid" : 368900 , "loc" : { "x" : -36 , "y" : -8} ,"categories" : [ "sports" , "hotel" , "restaurant"]});
+t.insert({ "uid" : 555344 , "loc" : { "x" : 13 , "y" : 29} ,"categories" : [ "sports" , "hotel"]});
+t.insert({ "uid" : 855878 , "loc" : { "x" : 38 , "y" : 30} ,"categories" : [ "sports" , "hotel"]});
+t.insert({ "uid" : 917347 , "loc" : { "x" : 15 , "y" : 46} ,"categories" : [ "hotel"]});
+t.insert({ "uid" : 647874 , "loc" : { "x" : 25 , "y" : 23} ,"categories" : [ "hotel" , "restaurant"]});
+t.insert({ "uid" : 518482 , "loc" : { "x" : 4 , "y" : 25} ,"categories" : [ ]});
+t.insert({ "uid" : 193466 , "loc" : { "x" : -39 , "y" : 22} ,"categories" : [ "sports" , "hotel"]});
+t.insert({ "uid" : 622442 , "loc" : { "x" : -24 , "y" : -46} ,"categories" : [ "hotel"]});
+t.insert({ "uid" : 297426 , "loc" : { "x" : 33 , "y" : -49} ,"categories" : [ "hotel"]});
+t.insert({ "uid" : 528464 , "loc" : { "x" : -43 , "y" : 48} ,"categories" : [ "restaurant"]});
+t.insert({ "uid" : 90579 , "loc" : { "x" : -4 , "y" : -23} ,"categories" : [ "restaurant"]});
+t.insert({ "uid" : 368895 , "loc" : { "x" : -8 , "y" : 14} ,"categories" : [ "sports" ]});
+t.insert({ "uid" : 355844 , "loc" : { "x" : 34 , "y" : -4} ,"categories" : [ "sports" , "hotel"]});
+
+
+assert.eq( 10 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}} } ).itcount() , "A" );
+assert.eq( 6 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}}, "categories" : "sports" } ).itcount() , "B" );
diff --git a/jstests/geo_circle3.js b/jstests/geo_circle3.js
new file mode 100644
index 0000000..2882b47
--- /dev/null
+++ b/jstests/geo_circle3.js
@@ -0,0 +1,28 @@
+// SERVER-848 and SERVER-1191.
+db.places.drop()
+
+n = 0;
+db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 52 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 52 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 52 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 52.0001 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 51.9999 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 52.0001 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 52.0001 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 51.9999 } })
+db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 51.9999 } })
+db.places.ensureIndex( { loc : "2d" } )
+radius=0.0001
+center=[5,52]
+//print(db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).count())
+// FIXME: we want an assert, e.g., that there be 5 answers in the find().
+db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).forEach(printjson);
+
+
+// the result:
+// { "_id" : ObjectId("4bb1f2f088df513435bcb4e1"), "loc" : { "x" : 5, "y" : 52 } }
+// { "_id" : ObjectId("4bb1f54383459c40223a8ae7"), "loc" : { "x" : 5, "y" : 51.9999 } }
+// { "_id" : ObjectId("4bb1f54583459c40223a8aeb"), "loc" : { "x" : 5.0001, "y" : 51.9999 } }
+// { "_id" : ObjectId("4bb1f2e588df513435bcb4e0"), "loc" : { "x" : 4.9999, "y" : 52 } }
+// { "_id" : ObjectId("4bb1f30888df513435bcb4e2"), "loc" : { "x" : 5.0001, "y" : 52 } }
+// { "_id" : ObjectId("4bb1f54383459c40223a8ae8"), "loc" : { "x" : 4.9999, "y" : 52.0001 } }
diff --git a/jstests/geo_circle4.js b/jstests/geo_circle4.js
new file mode 100644
index 0000000..9edd5a1
--- /dev/null
+++ b/jstests/geo_circle4.js
@@ -0,0 +1,24 @@
+// Reported as server-848.
+db.server848.drop();
+
+radius=0.0001;
+center=[5,52];
+
+db.server848.save({ "_id": 1, "loc" : { "x" : 4.9999, "y" : 52 } });
+db.server848.save({ "_id": 2, "loc" : { "x" : 5, "y" : 52 } });
+db.server848.save({ "_id": 3, "loc" : { "x" : 5.0001, "y" : 52 } });
+db.server848.save({ "_id": 4, "loc" : { "x" : 5, "y" : 52.0001 } });
+db.server848.save({ "_id": 5, "loc" : { "x" : 5, "y" : 51.9999 } });
+db.server848.save({ "_id": 6, "loc" : { "x" : 4.9999, "y" : 52.0001 } });
+db.server848.save({ "_id": 7, "loc" : { "x" : 5.0001, "y" : 52.0001 } });
+db.server848.save({ "_id": 8, "loc" : { "x" : 4.9999, "y" : 51.9999 } });
+db.server848.save({ "_id": 9, "loc" : { "x" : 5.0001, "y" : 51.9999 } });
+db.server848.ensureIndex( { loc : "2d" } );
+r=db.server848.find({"loc" : {"$within" : {"$center" : [center, radius]}}}, {_id:1});
+assert.eq(5, r.count(), "A1");
+// FIXME: surely code like this belongs in utils.js.
+a=r.toArray();
+x=[];
+for (k in a) { x.push(a[k]["_id"]) }
+x.sort()
+assert.eq([1,2,3,4,5], x, "B1");
diff --git a/jstests/geo_circle5.js b/jstests/geo_circle5.js
new file mode 100644
index 0000000..ed190e4
--- /dev/null
+++ b/jstests/geo_circle5.js
@@ -0,0 +1,28 @@
+// reported as server-1238.
+
+db.server1238.drop();
+db.server1238.remove()
+db.server1238.save({ loc: [ 5000000, 900000 ], id: 1})
+db.server1238.save({ loc: [ 5000000, 900000 ], id: 2})
+db.server1238.ensureIndex( { loc : "2d" } , { min : -21000000 , max : 21000000 } )
+db.server1238.save({ loc: [ 5000000, 900000 ], id: 3})
+db.server1238.save({ loc: [ 5000000, 900000 ], id: 4})
+
+c1=db.server1238.find({"loc" : {"$within" : {"$center" : [[5000000, 900000], 1.0]}}}).count()
+
+c2=db.server1238.find({"loc" : {"$within" : {"$center" : [[5000001, 900000], 5.0]}}}).count()
+
+
+assert.eq(4, c1, "A1");
+assert.eq(c1, c2, "B1");
+//print(db.server1238.find({"loc" : {"$within" : {"$center" : [[5000001, 900000], 5.0]}}}).toArray());
+// [
+// {
+// "_id" : ObjectId("4c173306f5d9d34a46cb7b11"),
+// "loc" : [
+// 5000000,
+// 900000
+// ],
+// "id" : 4
+// }
+// ] \ No newline at end of file
diff --git a/jstests/geo_haystack1.js b/jstests/geo_haystack1.js
new file mode 100644
index 0000000..f4035ec
--- /dev/null
+++ b/jstests/geo_haystack1.js
@@ -0,0 +1,59 @@
+
+t = db.geo_haystack1
+t.drop()
+
+function distance( a , b ){
+ var x = a[0] - b[0];
+ var y = a[1] - b[1];
+ return Math.sqrt( ( x * x ) + ( y * y ) );
+}
+
+function distanceTotal( a , arr , f ){
+ var total = 0;
+ for ( var i=0; i<arr.length; i++ ){
+ total += distance( a , arr[i][f] );
+ }
+ return total;
+}
+
+queries = [
+ { near : [ 7 , 8 ] , maxDistance : 3 , search : { z : 3 } } ,
+]
+
+answers = queries.map( function(){ return { totalDistance : 0 , results : [] }; } )
+
+
+n = 0;
+for ( x=0; x<20; x++ ){
+ for ( y=0; y<20; y++ ){
+ t.insert( { _id : n , loc : [ x , y ] , z : n % 5 } );
+
+ for ( i=0; i<queries.length; i++ ){
+ var d = distance( queries[i].near , [ x , y ] )
+ if ( d > queries[i].maxDistance )
+ continue;
+ if ( queries[i].search.z != n % 5 )
+ continue;
+ answers[i].results.push( { _id : n , loc : [ x , y ]} )
+ answers[i].totalDistance += d;
+ }
+
+ n++;
+ }
+}
+
+t.ensureIndex( { loc : "geoHaystack" , z : 1 } , { bucketSize : .7 } );
+
+for ( i=0; i<queries.length; i++ ){
+ print( "---------" );
+ printjson( queries[i] );
+ res = t.runCommand( "geoSearch" , queries[i] )
+ print( "\t" + tojson( res.stats ) );
+ print( "\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n );
+ assert.eq( answers[i].results.length , res.stats.n, "num:"+ i + " number matches" )
+ assert.eq( answers[i].totalDistance , distanceTotal( queries[i].near , res.results , "loc" ), "num:"+ i + " totalDistance" )
+ //printjson( res );
+ //printjson( answers[i].length );
+}
+
+
diff --git a/jstests/geo_haystack2.js b/jstests/geo_haystack2.js
new file mode 100644
index 0000000..2e0eb57
--- /dev/null
+++ b/jstests/geo_haystack2.js
@@ -0,0 +1,60 @@
+
+t = db.geo_haystack2
+t.drop()
+
+function distance( a , b ){
+ var x = a[0] - b[0];
+ var y = a[1] - b[1];
+ return Math.sqrt( ( x * x ) + ( y * y ) );
+}
+
+function distanceTotal( a , arr , f ){
+ var total = 0;
+ for ( var i=0; i<arr.length; i++ ){
+ total += distance( a , arr[i][f] );
+ }
+ return total;
+}
+
+queries = [
+ { near : [ 7 , 8 ] , maxDistance : 3 , search : { z : 3 } } ,
+]
+
+answers = queries.map( function(){ return { totalDistance : 0 , results : [] }; } )
+
+
+n = 0;
+for ( x=0; x<20; x++ ){
+ for ( y=0; y<20; y++ ){
+ t.insert( { _id : n , loc : [ x , y ] , z : [ n % 10 , ( n + 5 ) % 10 ] } );
+
+ for ( i=0; i<queries.length; i++ ){
+ var d = distance( queries[i].near , [ x , y ] )
+ if ( d > queries[i].maxDistance )
+ continue;
+ if ( queries[i].search.z != n % 10 &&
+ queries[i].search.z != ( n + 5 ) % 10 )
+ continue;
+ answers[i].results.push( { _id : n , loc : [ x , y ] } )
+ answers[i].totalDistance += d;
+ }
+
+ n++;
+ }
+}
+
+t.ensureIndex( { loc : "geoHaystack" , z : 1 } , { bucketSize : .7 } );
+
+for ( i=0; i<queries.length; i++ ){
+ print( "---------" );
+ printjson( queries[i] );
+ res = t.runCommand( "geoSearch" , queries[i] )
+ print( "\t" + tojson( res.stats ) );
+ print( "\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n );
+ assert.eq( answers[i].results.length , res.stats.n, "num:"+ i + " number matches" )
+ assert.eq( answers[i].totalDistance , distanceTotal( queries[i].near , res.results , "loc" ), "num:"+ i + " totalDistance" )
+ //printjson( res );
+ //printjson( answers[i].length );
+}
+
+
diff --git a/jstests/geod.js b/jstests/geod.js
new file mode 100644
index 0000000..6e45845
--- /dev/null
+++ b/jstests/geod.js
@@ -0,0 +1,14 @@
+var t=db.geod;
+t.drop()
+t.save( { loc: [0,0] } )
+t.save( { loc: [0.5,0] } )
+t.ensureIndex({loc:"2d"})
+// do a few geoNears with different maxDistances. The first iteration
+// should match no points in the dataset.
+dists = [.49, .51, 1.0]
+for (idx in dists){
+ b=db.runCommand({geoNear:"geod", near:[1,0], num:2, maxDistance:dists[idx]});
+ assert.eq(b.errmsg, undefined, "A"+idx);
+ l=b.results.length
+ assert.eq(l, idx, "B"+idx)
+}
diff --git a/jstests/geoe.js b/jstests/geoe.js
new file mode 100644
index 0000000..22feb83
--- /dev/null
+++ b/jstests/geoe.js
@@ -0,0 +1,32 @@
+// Was reported as SERVER-1283.
+// The problem seems to be that sometimes the index btrees are such that
+// the first search for a matching point in the geo code could run to
+// the end of the btree and not reverse direction (leaving the rest of
+// the search always looking at some random non-matching point).
+
+t=db.geo_box;
+t.drop();
+
+t.insert({"_id": 1, "geo" : [ 33, -11.1 ] });
+t.insert({"_id": 2, "geo" : [ -122, 33.3 ] });
+t.insert({"_id": 3, "geo" : [ -122, 33.4 ] });
+t.insert({"_id": 4, "geo" : [ -122.28, 37.67 ] });
+t.insert({"_id": 5, "geo" : [ -122.29, 37.68 ] });
+t.insert({"_id": 6, "geo" : [ -122.29, 37.67 ] });
+t.insert({"_id": 7, "geo" : [ -122.29, 37.67 ] });
+t.insert({"_id": 8, "geo" : [ -122.29, 37.68 ] });
+t.insert({"_id": 9, "geo" : [ -122.29, 37.68 ] });
+t.insert({"_id": 10, "geo" : [ -122.3, 37.67 ] });
+t.insert({"_id": 11, "geo" : [ -122.31, 37.67 ] });
+t.insert({"_id": 12, "geo" : [ -122.3, 37.66 ] });
+t.insert({"_id": 13, "geo" : [ -122.2435, 37.637072 ] });
+t.insert({"_id": 14, "geo" : [ -122.289505, 37.695774 ] });
+
+
+t.ensureIndex({ geo : "2d" });
+
+c=t.find({geo: {"$within": {"$box": [[-125.078461,36.494473], [-120.320648,38.905199]]} } });
+assert.eq(11, c.count(), "A1");
+
+c=t.find({geo: {"$within": {"$box": [[-124.078461,36.494473], [-120.320648,38.905199]]} } });
+assert.eq(11, c.count(), "B1");
diff --git a/jstests/group6.js b/jstests/group6.js
new file mode 100644
index 0000000..8d738d4
--- /dev/null
+++ b/jstests/group6.js
@@ -0,0 +1,31 @@
+t = db.jstests_group6;
+t.drop();
+
+for( i = 1; i <= 10; ++i ) {
+ t.save( {i:new NumberLong( i ),y:1} );
+}
+
+assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+
+t.drop();
+for( i = 1; i <= 10; ++i ) {
+ if ( i % 2 == 0 ) {
+ t.save( {i:new NumberLong( i ),y:1} );
+ } else {
+ t.save( {i:i,y:1} );
+ }
+}
+
+assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+
+t.drop();
+for( i = 1; i <= 10; ++i ) {
+ if ( i % 2 == 1 ) {
+ t.save( {i:new NumberLong( i ),y:1} );
+ } else {
+ t.save( {i:i,y:1} );
+ }
+}
+
+assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+
diff --git a/jstests/hint1.js b/jstests/hint1.js
index c222aa3..63a5fa6 100644
--- a/jstests/hint1.js
+++ b/jstests/hint1.js
@@ -6,5 +6,5 @@ p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true
p.ensureIndex( { ts: 1 } );
e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: " alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
-assert.eq( e.indexBounds[0][0].ts.getTime(), new Date( 1234119308272 ).getTime() , "A" );
-assert.eq( 0 , e.indexBounds[0][1].ts.getTime() , "B" );
+assert.eq( e.indexBounds.ts[0][0].getTime(), new Date( 1234119308272 ).getTime() , "A" );
+assert.eq( 0 , e.indexBounds.ts[0][1].getTime() , "B" );
diff --git a/jstests/in3.js b/jstests/in3.js
index 1ec53ca..305fb22 100644
--- a/jstests/in3.js
+++ b/jstests/in3.js
@@ -2,8 +2,8 @@ t = db.jstests_in3;
t.drop();
t.ensureIndex( {i:1} );
-assert.eq( [ [ {i:3}, {i:3} ] ], t.find( {i:{$in:[3]}} ).explain().indexBounds , "A1" );
-assert.eq( [ [ {i:3}, {i:3} ], [ {i:6}, {i:6} ] ], t.find( {i:{$in:[3,6]}} ).explain().indexBounds , "A2" );
+assert.eq( {i:[[3,3]]}, t.find( {i:{$in:[3]}} ).explain().indexBounds , "A1" );
+assert.eq( {i:[[3,3],[6,6]]}, t.find( {i:{$in:[3,6]}} ).explain().indexBounds , "A2" );
for ( var i=0; i<20; i++ )
t.insert( { i : i } );
diff --git a/jstests/in4.js b/jstests/in4.js
new file mode 100644
index 0000000..9aed608
--- /dev/null
+++ b/jstests/in4.js
@@ -0,0 +1,53 @@
+t = db.jstests_in4;
+
+function checkRanges( a, b ) {
+ assert.eq( a, b );
+// expectedCount = a;
+// r = b;
+//// printjson( r );
+// assert.eq.automsg( "expectedCount", "r.a.length" );
+// for( i in r.a ) {
+// assert.eq.automsg( "r.a[ i ][ 0 ]", "r.a[ i ][ 1 ]" );
+// }
+// assert.eq.automsg( "expectedCount", "r.b.length" );
+// for( i in r.b ) {
+// assert.eq.automsg( "r.b[ i ][ 0 ]", "r.b[ i ][ 1 ]" );
+// }
+}
+
+t.drop();
+t.ensureIndex( {a:1,b:1} );
+checkRanges( {a:[[2,2]],b:[[3,3]]}, t.find( {a:2,b:3} ).explain().indexBounds );
+checkRanges( {a:[[2,2],[3,3]],b:[[4,4]]}, t.find( {a:{$in:[2,3]},b:4} ).explain().indexBounds );
+checkRanges( {a:[[2,2]],b:[[3,3],[4,4]]}, t.find( {a:2,b:{$in:[3,4]}} ).explain().indexBounds );
+checkRanges( {a:[[2,2],[3,3]],b:[[4,4],[5,5]]}, t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().indexBounds );
+
+checkRanges( {a:[[2,2],[3,3]],b:[[4,10]]}, t.find( {a:{$in:[2,3]},b:{$gt:4,$lt:10}} ).explain().indexBounds );
+
+t.save( {a:1,b:1} );
+t.save( {a:2,b:4.5} );
+t.save( {a:2,b:4} );
+assert.eq.automsg( "1", "t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().nscanned" );
+assert.eq.automsg( "2", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).a" );
+assert.eq.automsg( "4", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).b" );
+
+t.drop();
+t.ensureIndex( {a:1,b:1,c:1} );
+checkRanges( {a:[[2,2]],b:[[3,3],[4,4]],c:[[5,5]]}, t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().indexBounds );
+
+t.save( {a:2,b:3,c:5} );
+t.save( {a:2,b:3,c:4} );
+assert.eq.automsg( "1", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
+t.remove();
+t.save( {a:2,b:4,c:5} );
+t.save( {a:2,b:4,c:4} );
+assert.eq.automsg( "1", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
+
+t.drop();
+t.ensureIndex( {a:1,b:-1} );
+ib = t.find( {a:2,b:{$in:[3,4]}} ).explain().indexBounds;
+checkRanges( {a:[[2,2]],b:[[4,4],[3,3]]}, ib );
+assert.automsg( "ib.b[ 0 ][ 0 ] > ib.b[ 1 ][ 0 ]" );
+ib = t.find( {a:2,b:{$in:[3,4]}} ).sort( {a:-1,b:1} ).explain().indexBounds;
+checkRanges( {a:[[2,2]],b:[[3,3],[4,4]]}, ib );
+assert.automsg( "ib.b[ 0 ][ 0 ] < ib.b[ 1 ][ 0 ]" );
diff --git a/jstests/in5.js b/jstests/in5.js
new file mode 100644
index 0000000..435c886
--- /dev/null
+++ b/jstests/in5.js
@@ -0,0 +1,56 @@
+
+t = db.in5
+
+function go( fn ){
+ t.drop();
+ o = {};
+ o[fn] = { a : 1 , b : 2 };
+ t.insert( o );
+
+ x = {};
+ x[fn] = { a : 1 , b : 2 };
+ assert.eq( 1 , t.find( x ).itcount() , "A1 - " + fn );
+
+
+ y = {};
+ y[fn] = { $in : [ { a : 1 , b : 2 } ] }
+ assert.eq( 1 , t.find( y ).itcount() , "A2 - " + fn );
+
+
+ z = {};
+ z[fn+".a"] = 1;
+ z[fn+".b"] = { $in : [ 2 ] }
+ assert.eq( 1 , t.find( z ).itcount() , "A3 - " + fn ); // SERVER-1366
+
+
+ i = {}
+ i[fn] = 1
+ t.ensureIndex( i )
+
+ assert.eq( 1 , t.find( x ).itcount() , "B1 - " + fn );
+ assert.eq( 1 , t.find( y ).itcount() , "B2 - " + fn );
+ assert.eq( 1 , t.find( z ).itcount() , "B3 - " + fn ); // SERVER-1366
+
+ t.dropIndex( i )
+
+ assert.eq( 1 , t.getIndexes().length , "T2" );
+
+ i = {}
+ i[fn + ".a" ] = 1;
+ t.ensureIndex( i )
+ assert.eq( 2 , t.getIndexes().length , "T3" );
+
+ assert.eq( 1 , t.find( x ).itcount() , "C1 - " + fn );
+ assert.eq( 1 , t.find( y ).itcount() , "C2 - " + fn );
+ assert.eq( 1 , t.find( z ).itcount() , "C3 - " + fn ); // SERVER-1366
+
+ t.dropIndex( i )
+
+
+}
+
+go( "x" );
+go( "_id" )
+
+
+
diff --git a/jstests/in6.js b/jstests/in6.js
new file mode 100644
index 0000000..f114d93
--- /dev/null
+++ b/jstests/in6.js
@@ -0,0 +1,13 @@
+t = db.jstests_in6;
+t.drop();
+
+t.save( {} );
+
+function doTest() {
+ assert.eq.automsg( "1", "t.count( {i:null} )" );
+ assert.eq.automsg( "1", "t.count( {i:{$in:[null]}} )" );
+}
+
+doTest();
+t.ensureIndex( {i:1} );
+doTest();
diff --git a/jstests/in7.js b/jstests/in7.js
new file mode 100644
index 0000000..212723d
--- /dev/null
+++ b/jstests/in7.js
@@ -0,0 +1,6 @@
+t = db.jstests_slow_in1;
+
+t.drop();
+t.ensureIndex( {a:1,b:1,c:1,d:1,e:1,f:1} );
+i = {$in:[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ]};
+assert.throws.automsg( function() { t.count( {a:i,b:i,c:i,d:i,e:i,f:i} ); } );
diff --git a/jstests/index1.js b/jstests/index1.js
index 620f8bb..9767d08 100644
--- a/jstests/index1.js
+++ b/jstests/index1.js
@@ -17,9 +17,9 @@ assert( t.findOne( { z : { a : 17 } } ) == null);
o = { name : "bar" , z : { a : 18 } };
t.save( o );
-assert( t.find().length() == 2 );
-assert( t.find().sort( { "z.a" : 1 } ).length() == 2 );
-assert( t.find().sort( { "z.a" : -1 } ).length() == 2 );
+assert.eq.automsg( "2", "t.find().length()" );
+assert.eq.automsg( "2", "t.find().sort( { 'z.a' : 1 } ).length()" );
+assert.eq.automsg( "2", "t.find().sort( { 'z.a' : -1 } ).length()" );
// We are planning to phase out this syntax.
assert( t.find().sort( { z : { a : 1 } } ).length() == 2 );
assert( t.find().sort( { z : { a: -1 } } ).length() == 2 );
diff --git a/jstests/index10.js b/jstests/index10.js
index c638264..92f5927 100644
--- a/jstests/index10.js
+++ b/jstests/index10.js
@@ -14,9 +14,9 @@ assert.eq( 5, t.count() );
t.dropIndexes();
t.ensureIndex( {i:1}, true );
err = db.getLastErrorObj();
-assert( err.err );
+assert( err.err , "err.err" );
assert.eq( 11000, err.code );
-assert.eq( 1, db.system.indexes.count( {ns:"test.jstests_index10" } ) ); // only id index
+assert( 1 == db.system.indexes.count( {ns:"test.jstests_index10" } ), "only id index" );
// t.dropIndexes();
ts = t.totalIndexSize();
diff --git a/jstests/index6.js b/jstests/index6.js
index 7514aca..8dbd8f7 100644
--- a/jstests/index6.js
+++ b/jstests/index6.js
@@ -1,6 +1,6 @@
// index6.js Test indexes on array subelements.
-r = db.ed.db.index5;
+r = db.ed.db.index6;
r.drop();
r.save( { comments : [ { name : "eliot", foo : 1 } ] } );
diff --git a/jstests/index7.js b/jstests/index7.js
index a3b88d5..9e3a6c6 100644
--- a/jstests/index7.js
+++ b/jstests/index7.js
@@ -9,12 +9,14 @@ function noIndex( q ) {
}
function start( k, q, rev) {
- var s = q.explain().indexBounds[rev?1:0][0];
+ var exp = q.explain().indexBounds;
+ var s = {a:exp.a[rev?1:0][0],b:exp.b[0][0]};
assert.eq( k.a, s.a );
assert.eq( k.b, s.b );
}
function end( k, q, rev) {
- var e = q.explain().indexBounds[rev?1:0][1];
+ var exp = q.explain().indexBounds
+ var e = {a:exp.a[rev?1:0][1],b:exp.b[0][1]};
assert.eq( k.a, e.a );
assert.eq( k.b, e.b );
}
@@ -33,12 +35,12 @@ noIndex( f.find( { a: 5 } ).sort( { a: 1 } ).hint( { $natural: 1 } ) );
f.drop();
f.ensureIndex( { a: 1, b: 1 } );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].a );
-assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].a );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].a );
-assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].a );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][0].c );
-assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds[0][1].c );
+assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][0] );
+assert.eq( 1, f.find( { a: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][1] );
+assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][0] );
+assert.eq( 1, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.a[0][1] );
+assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.c );
+assert.eq( null, f.find( { a: 1, c: 1 } ).hint( { a: 1, b: 1 } ).explain().indexBounds.c );
start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).hint( { a: 1, b: 1 } ) );
start( { a: "a", b: 1 }, f.find( { a: /^a/, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
diff --git a/jstests/index_check2.js b/jstests/index_check2.js
index a489fd6..eed3b8e 100644
--- a/jstests/index_check2.js
+++ b/jstests/index_check2.js
@@ -38,4 +38,4 @@ scanned3 = t.find(q3).explain().nscanned;
assert( scanned3 <= Math.max( scanned1 , scanned2 ) , "$all makes query optimizer not work well" );
exp3 = t.find( q3 ).explain();
-assert.eq( exp3.indexBounds[0][0], exp3.indexBounds[0][1], "$all range not a single key" );
+assert.eq( exp3.indexBounds.tags[0][0], exp3.indexBounds.tags[0][1], "$all range not a single key" );
diff --git a/jstests/index_check6.js b/jstests/index_check6.js
index 71e6420..240f4cf 100644
--- a/jstests/index_check6.js
+++ b/jstests/index_check6.js
@@ -12,6 +12,54 @@ for ( var age=10; age<50; age++ ){
assert.eq( 10 , t.find( { age : 30 } ).explain().nscanned , "A" );
assert.eq( 20 , t.find( { age : { $gte : 29 , $lte : 30 } } ).explain().nscanned , "B" );
+assert.eq( 12 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).explain().nscanned , "C1" );
+
+assert.eq( 2 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).explain().nscanned , "C" ); // SERVER-371
+assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).explain().nscanned , "D" ); // SERVER-371
+
+assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } ).explain().nscanned" );
+
+t.drop();
+
+for ( var a=1; a<10; a++ ){
+ for ( var b=0; b<10; b++ ){
+ for ( var c=0; c<10; c++ ) {
+ t.save( { a:a, b:b, c:c } );
+ }
+ }
+}
+
+function doTest( s ) {
+ sort = s;
+assert.eq.automsg( "1", "t.find( { a:5, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( { a:5, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5.5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5,$lte:5.5} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "3", "t.find( { a:5, b:5, c:{$gte:5,$lte:7} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "3", "t.find( { a:5, b:{$gte:5,$lte:7}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "1", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "3", "t.find( { a:{$gte:5,$lte:7}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "4", "t.find( { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "8", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+}
+
+for ( var a = -1; a <= 1; a += 2 ) {
+ for( var b = -1; b <= 1; b += 2 ) {
+ for( var c = -1; c <= 1; c += 2 ) {
+ t.dropIndexes();
+ var spec = {a:a,b:b,c:c};
+ t.ensureIndex( spec );
+ doTest( spec );
+ doTest( {a:-a,b:-b,c:-c} );
+ }
+ }
+}
-//assert.eq( 2 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).explain().nscanned , "C" ); // SERVER-371
-//assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).explain().nscanned , "D" ); // SERVER-371
diff --git a/jstests/index_check8.js b/jstests/index_check8.js
new file mode 100644
index 0000000..bc267df
--- /dev/null
+++ b/jstests/index_check8.js
@@ -0,0 +1,15 @@
+
+t = db.index_check8
+t.drop();
+
+t.insert( { a : 1 , b : 1 , c : 1 , d : 1 , e : 1 } )
+t.ensureIndex( { a : 1 , b : 1 , c : 1 } )
+t.ensureIndex( { a : 1 , b : 1 , d : 1 , e : 1 } )
+
+x = t.find( { a : 1 , b : 1 , d : 1 } ).sort( { e : 1 } ).explain()
+assert( ! x.scanAndOrder , "A : " + tojson( x ) )
+
+x = t.find( { a : 1 , b : 1 , c : 1 , d : 1 } ).sort( { e : 1 } ).explain()
+//assert( ! x.scanAndOrder , "B : " + tojson( x ) )
+
+
diff --git a/jstests/index_elemmatch1.js b/jstests/index_elemmatch1.js
new file mode 100644
index 0000000..425807a
--- /dev/null
+++ b/jstests/index_elemmatch1.js
@@ -0,0 +1,28 @@
+
+t = db.index_elemmatch1
+t.drop()
+
+x = 0
+y = 0
+for ( a=0; a<100; a++ ){
+ for ( b=0; b<100; b++ ){
+ t.insert( { a : a , b : b % 10 , arr : [ { x : x++ % 10 , y : y++ % 10 } ] } )
+ }
+}
+
+t.ensureIndex( { a : 1 , b : 1 } )
+t.ensureIndex( { "arr.x" : 1 , a : 1 } )
+
+assert.eq( 100 , t.find( { a : 55 } ).itcount() , "A1" );
+assert.eq( 10 , t.find( { a : 55 , b : 7 } ).itcount() , "A2" );
+
+q = { a : 55 , b : { $in : [ 1 , 5 , 8 ] } }
+assert.eq( 30 , t.find( q ).itcount() , "A3" )
+
+q.arr = { $elemMatch : { x : 5 , y : 5 } }
+assert.eq( 10 , t.find( q ).itcount() , "A4" )
+
+assert.eq( t.find(q).itcount() , t.find(q).explain().nscanned , "A5" )
+
+
+
diff --git a/jstests/index_many.js b/jstests/index_many.js
index 9960afa..46705a2 100644
--- a/jstests/index_many.js
+++ b/jstests/index_many.js
@@ -1,34 +1,51 @@
-t = db.many;
+/* test using lots of indexes on one collection */
-t.drop();
-db.many2.drop();
+t = db.many;
-t.save({x:9});
-t.save({x:19});
+function f() {
+
+ t.drop();
+ db.many2.drop();
+
+ t.save({ x: 9, y : 99 });
+ t.save({ x: 19, y : 99 });
+
+ x = 2;
+ while (x < 70) {
+ patt = {};
+ patt[x] = 1;
+ if (x == 20)
+ patt = { x: 1 };
+ if (x == 64)
+ patt = { y: 1 };
+ t.ensureIndex(patt);
+ x++;
+ }
+
+ // print( tojson(db.getLastErrorObj()) );
+ assert(db.getLastError(), "should have got an error 'too many indexes'");
+
+ // 40 is the limit currently
+ lim = t.getIndexes().length;
+ if (lim != 64) {
+ print("# of indexes should be 64 but is : " + lim);
+ return;
+ }
+ assert(lim == 64, "not 64 indexes");
+
+ assert(t.find({ x: 9 }).length() == 1, "b");
+ assert(t.find({ x: 9 }).explain().cursor.match(/Btree/), "not using index?");
+
+ assert(t.find({ y: 99 }).length() == 2, "y idx");
+ assert(t.find({ y: 99 }).explain().cursor.match(/Btree/), "not using y index?");
+
+ /* check that renamecollection remaps all the indexes right */
+ assert(t.renameCollection("many2").ok, "rename failed");
+ assert(t.find({ x: 9 }).length() == 0, "many2a");
+ assert(db.many2.find({ x: 9 }).length() == 1, "many2b");
+ assert(t.find({ y: 99 }).length() == 0, "many2c");
+ assert(db.many2.find({ y: 99 }).length() == 2, "many2d");
-x = 2;
-while( x < 60 ) {
- patt={};
- patt[x] = 1;
- if( x == 20 )
- patt = { x : 1 };
- t.ensureIndex(patt);
- x++;
}
-// print( tojson(db.getLastErrorObj()) );
-assert( db.getLastError(), "should have an error 'too many indexes'" );
-
-// 40 is the limit currently
-
-// print( t.getIndexes().length == 40, "40" );
-
-assert( t.getIndexes().length == 40, "40" );
-
-assert( t.find({x:9}).length() == 1, "b" ) ;
-
-t.renameCollection( "many2" );
-
-assert( t.find({x:9}).length() == 0, "c" ) ;
-
-assert( db.many2.find({x:9}).length() == 1, "d" ) ;
+f();
diff --git a/jstests/index_many2.js b/jstests/index_many2.js
new file mode 100644
index 0000000..3fca5f5
--- /dev/null
+++ b/jstests/index_many2.js
@@ -0,0 +1,29 @@
+
+t = db.index_many2;
+t.drop()
+
+t.save( { x : 1 } )
+
+assert.eq( 1 , t.getIndexKeys().length , "A1" )
+
+function make( n ){
+ var x = {}
+ x["x"+n] = 1;
+ return x;
+}
+
+for ( i=1; i<1000; i++ ){
+ t.ensureIndex( make(i) );
+}
+
+assert.eq( 64 , t.getIndexKeys().length , "A2" )
+
+
+num = t.getIndexKeys().length
+
+t.dropIndex( make(num-1) )
+assert.eq( num - 1 , t.getIndexKeys().length , "B0" )
+
+t.ensureIndex( { z : 1 } )
+assert.eq( num , t.getIndexKeys().length , "B1" )
+
diff --git a/jstests/indexapi.js b/jstests/indexapi.js
index ae76ec7..1bbdf43 100644
--- a/jstests/indexapi.js
+++ b/jstests/indexapi.js
@@ -34,7 +34,7 @@ idx = t.getIndexes();
assert.eq( 2 , idx.length , "M1" );
assert.eq( key , idx[1].key , "M2" );
assert( idx[1].unique , "M3" );
-printjson( idx );
+//printjson( idx );
db.system.indexes.insert( { ns : "test" , key : { x : 1 } , name : "x" } );
assert( db.getLastError().indexOf( "invalid" ) >= 0 , "Z1" );
diff --git a/jstests/indexe.js b/jstests/indexe.js
index 3170757..c08d117 100644
--- a/jstests/indexe.js
+++ b/jstests/indexe.js
@@ -12,7 +12,7 @@ assert.eq( num , t.find().count() ,"A1" );
assert.eq( num , t.find( { a : "b" } ).count() , "B1" );
assert.eq( num , t.find( { a : "b" } ).itcount() , "C1" );
-t.ensureIndex( { a : "b" } );
+t.ensureIndex( { a : 1 } );
assert.eq( num , t.find().count() ,"A2" );
assert.eq( num , t.find().sort( { a : 1 } ).count() , "A2a" );
diff --git a/jstests/indexh.js b/jstests/indexh.js
new file mode 100644
index 0000000..c6aad18
--- /dev/null
+++ b/jstests/indexh.js
@@ -0,0 +1,34 @@
+// This should get skipped when testing replication
+
+t = db.jstests_indexh;
+
+function debug( t ) {
+ print( t );
+}
+
+// index extent freeing
+t.drop();
+t.save( {} );
+var s1 = db.stats().dataSize;
+debug( "s1: " + s1 );
+t.ensureIndex( {a:1} );
+var s2 = db.stats().dataSize;
+debug( "s2: " + s2 );
+assert.automsg( "s1 < s2" );
+t.dropIndex( {a:1} );
+var s3 = db.stats().dataSize;
+debug( "s3: " + s3 );
+assert.eq.automsg( "s1", "s3" );
+
+// index node freeing
+t.drop();
+t.ensureIndex( {a:1} );
+for( i = 'a'; i.length < 500; i += 'a' ) {
+ t.save( {a:i} );
+}
+var s4 = db.stats().indexSize;
+debug( "s4: " + s4 );
+t.remove( {} );
+var s5 = db.stats().indexSize;
+debug( "s5: " + s5 );
+assert.automsg( "s5 < s4" ); \ No newline at end of file
diff --git a/jstests/maxscan.js b/jstests/maxscan.js
new file mode 100644
index 0000000..c455efb
--- /dev/null
+++ b/jstests/maxscan.js
@@ -0,0 +1,14 @@
+
+t = db.maxscan;
+t.drop();
+
+N = 100;
+for ( i=0; i<N; i++ ){
+ t.insert( { _id : i , x : i % 10 } );
+}
+
+assert.eq( N , t.find().itcount() , "A" )
+assert.eq( 50 , t.find()._addSpecial( "$maxScan" , 50 ).itcount() , "B" )
+
+assert.eq( 10 , t.find( { x : 2 } ).itcount() , "C" )
+assert.eq( 5 , t.find( { x : 2 } )._addSpecial( "$maxScan" , 50 ).itcount() , "D" )
diff --git a/jstests/not2.js b/jstests/not2.js
index 5d33baa..dcd4535 100644
--- a/jstests/not2.js
+++ b/jstests/not2.js
@@ -27,6 +27,7 @@ t.save( {i:"a"} );
t.save( {i:"b"} );
fail( {i:{$not:"a"}} );
+fail( {i:{$not:{$not:"a"}}} );
fail( {i:{$not:{$not:{$gt:"a"}}}} );
fail( {i:{$not:{$ref:"foo"}}} );
fail( {i:{$not:{}}} );
@@ -87,21 +88,21 @@ t.save( {i:"b"} );
t.ensureIndex( {i:1} );
indexed = function( query, min, max ) {
- exp = t.find( query ).explain();
+ exp = t.find( query ).explain( true );
// printjson( exp );
assert( exp.cursor.match( /Btree/ ), tojson( query ) );
assert( exp.allPlans.length == 1, tojson( query ) );
// just expecting one element per key
- for( i in exp.indexBounds[0][0] ) {
- assert.eq( exp.indexBounds[0][0][ i ], min );
+ for( i in exp.indexBounds ) {
+ assert.eq( exp.indexBounds[ i ][0][0], min );
}
- for( i in exp.indexBounds[0][1] ) {
- assert.eq( exp.indexBounds[0][1][ i ], max );
+ for( i in exp.indexBounds ) {
+ assert.eq( exp.indexBounds[ i ][0][1], max );
}
}
not = function( query ) {
- exp = t.find( query ).explain();
+ exp = t.find( query ).explain( true );
// printjson( exp );
assert( !exp.cursor.match( /Btree/ ), tojson( query ) );
assert( exp.allPlans.length == 1, tojson( query ) );
@@ -125,6 +126,8 @@ indexed( {i:{$not:{$lt:"b"}}}, "b", {} );
indexed( {i:{$lte:"b"}}, "", "b" );
indexed( {i:{$not:{$lte:"b"}}}, "b", {} );
+indexed( {i:{$not:{$lte:"b",$gte:"f"}}}, "b", "f" );
+
not( {i:{$not:{$all:["a"]}}} );
not( {i:{$not:{$mod:[2,1]}}} );
not( {i:{$not:{$type:2}}} );
diff --git a/jstests/numberlong.js b/jstests/numberlong.js
new file mode 100644
index 0000000..848ef87
--- /dev/null
+++ b/jstests/numberlong.js
@@ -0,0 +1,55 @@
+assert.eq.automsg( "0", "new NumberLong()" );
+
+n = new NumberLong( 4 );
+assert.eq.automsg( "4", "n" );
+assert.eq.automsg( "4", "n.toNumber()" );
+assert.eq.automsg( "8", "n + 4" );
+assert.eq.automsg( "'NumberLong( 4 )'", "n.toString()" );
+assert.eq.automsg( "'NumberLong( 4 )'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberLong( 4 ) }'", "p" );
+
+assert.eq.automsg( "NumberLong( 4 )", "eval( tojson( NumberLong( 4 ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberLong( -4 );
+assert.eq.automsg( "-4", "n" );
+assert.eq.automsg( "-4", "n.toNumber()" );
+assert.eq.automsg( "0", "n + 4" );
+assert.eq.automsg( "'NumberLong( -4 )'", "n.toString()" );
+assert.eq.automsg( "'NumberLong( -4 )'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberLong( -4 ) }'", "p" );
+
+// too big to fit in double
+n = new NumberLong( "11111111111111111" );
+assert.eq.automsg( "11111111111111112", "n.toNumber()" );
+assert.eq.automsg( "11111111111111116", "n + 4" );
+assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "n.toString()" );
+assert.eq.automsg( "'NumberLong( \"11111111111111111\" )'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberLong( \"11111111111111111\" ) }'", "p" );
+
+assert.eq.automsg( "NumberLong( '11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberLong( "-11111111111111111" );
+assert.eq.automsg( "-11111111111111112", "n.toNumber()" );
+assert.eq.automsg( "-11111111111111108", "n + 4" );
+assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "n.toString()" );
+assert.eq.automsg( "'NumberLong( \"-11111111111111111\" )'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberLong( \"-11111111111111111\" ) }'", "p" );
+
+// parsing
+assert.throws.automsg( function() { new NumberLong( "" ); } );
+assert.throws.automsg( function() { new NumberLong( "y" ); } );
+assert.throws.automsg( function() { new NumberLong( "11111111111111111111" ); } );
diff --git a/jstests/objid5.js b/jstests/objid5.js
index ab883bc..9a26839 100644
--- a/jstests/objid5.js
+++ b/jstests/objid5.js
@@ -4,3 +4,15 @@ t.drop();
t.save( { _id : 5.5 } );
assert.eq( 18 , Object.bsonsize( t.findOne() ) , "A" );
+
+x = db.runCommand( { features : 1 } )
+y = db.runCommand( { features : 1 , oidReset : 1 } )
+
+if( !x.ok )
+ print("x: " + tojson(x));
+
+assert( x.oidMachine , "B1" )
+assert.neq( x.oidMachine , y.oidMachine , "B2" )
+assert.eq( x.oidMachine , y.oidMachineOld , "B3" )
+
+assert.eq( 18 , Object.bsonsize( { _id : 7.7 } ) , "C" )
diff --git a/jstests/objid6.js b/jstests/objid6.js
new file mode 100644
index 0000000..c414ff0
--- /dev/null
+++ b/jstests/objid6.js
@@ -0,0 +1,14 @@
+o = new ObjectId();
+assert(o.getTimestamp);
+
+a = new ObjectId("4c17f616a707427266a2801a");
+b = new ObjectId("4c17f616a707428966a2801c");
+assert.eq(a.getTimestamp(), b.getTimestamp() , "A" );
+
+x = Math.floor( (new Date()).getTime() / 1000 );
+a = new ObjectId();
+z = Math.floor( (new Date()).getTime() / 1000 );
+y = a.getTimestamp().getTime() / 1000;
+
+assert( x <= y , "B" );
+assert( y <= z , "C" );
diff --git a/jstests/objid7.js b/jstests/objid7.js
new file mode 100644
index 0000000..070a9d1
--- /dev/null
+++ b/jstests/objid7.js
@@ -0,0 +1,13 @@
+
+a = new ObjectId( "4c1a478603eba73620000000" )
+b = new ObjectId( "4c1a478603eba73620000000" )
+c = new ObjectId();
+
+assert.eq( a.toString() , b.toString() , "A" )
+assert.eq( a.toString() , "4c1a478603eba73620000000" , "B" );
+
+assert( a.equals( b ) , "C" )
+
+assert.neq( a.toString() , c.toString() , "D" );
+assert( ! a.equals( c ) , "E" );
+
diff --git a/jstests/or1.js b/jstests/or1.js
new file mode 100644
index 0000000..66162c4
--- /dev/null
+++ b/jstests/or1.js
@@ -0,0 +1,57 @@
+t = db.jstests_or1;
+t.drop();
+
+checkArrs = function( a, b, m ) {
+ assert.eq( a.length, b.length, m );
+ aStr = [];
+ bStr = [];
+ a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
+ b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
+ for ( i in aStr ) {
+ assert( -1 != bStr.indexOf( aStr[ i ] ), m );
+ }
+}
+
+doTest = function() {
+
+t.save( {_id:0,a:1} );
+t.save( {_id:1,a:2} );
+t.save( {_id:2,b:1} );
+t.save( {_id:3,b:2} );
+t.save( {_id:4,a:1,b:1} );
+t.save( {_id:5,a:1,b:2} );
+t.save( {_id:6,a:2,b:1} );
+t.save( {_id:7,a:2,b:2} );
+
+assert.throws( function() { t.find( { $or:"a" } ).toArray(); } );
+assert.throws( function() { t.find( { $or:[] } ).toArray(); } );
+assert.throws( function() { t.find( { $or:[ "a" ] } ).toArray(); } );
+
+a1 = t.find( { $or: [ { a : 1 } ] } ).toArray();
+checkArrs( [ { _id:0, a:1 }, { _id:4, a:1, b:1 }, { _id:5, a:1, b:2 } ], a1 );
+
+a1b2 = t.find( { $or: [ { a : 1 }, { b : 2 } ] } ).toArray();
+checkArrs( [ { _id:0, a:1 }, { _id:3, b:2 }, { _id:4, a:1, b:1 }, { _id:5, a:1, b:2 }, { _id:7, a:2, b:2 } ], a1b2 );
+
+t.drop();
+t.save( {a:[0,1],b:[0,1]} );
+assert.eq( 1, t.find( { $or: [ { a: {$in:[0,1]}} ] } ).toArray().length );
+assert.eq( 1, t.find( { $or: [ { b: {$in:[0,1]}} ] } ).toArray().length );
+assert.eq( 1, t.find( { $or: [ { a: {$in:[0,1]}}, { b: {$in:[0,1]}} ] } ).toArray().length );
+
+}
+
+doTest();
+
+// not part of SERVER-1003, but good check for subseq. implementations
+t.drop();
+t.ensureIndex( {a:1} );
+doTest();
+
+t.drop();
+t.ensureIndex( {b:1} );
+doTest();
+
+t.drop();
+t.ensureIndex( {a:1,b:1} );
+doTest(); \ No newline at end of file
diff --git a/jstests/or2.js b/jstests/or2.js
new file mode 100644
index 0000000..d90cc85
--- /dev/null
+++ b/jstests/or2.js
@@ -0,0 +1,68 @@
+t = db.jstests_or2;
+t.drop();
+
+checkArrs = function( a, b, m ) {
+ assert.eq( a.length, b.length, m );
+ aStr = [];
+ bStr = [];
+ a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
+ b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
+ for ( i in aStr ) {
+ assert( -1 != bStr.indexOf( aStr[ i ] ), m );
+ }
+}
+
+doTest = function( index ) {
+ if ( index == null ) {
+ index = true;
+ }
+
+ t.save( {_id:0,x:0,a:1} );
+ t.save( {_id:1,x:0,a:2} );
+ t.save( {_id:2,x:0,b:1} );
+ t.save( {_id:3,x:0,b:2} );
+ t.save( {_id:4,x:1,a:1,b:1} );
+ t.save( {_id:5,x:1,a:1,b:2} );
+ t.save( {_id:6,x:1,a:2,b:1} );
+ t.save( {_id:7,x:1,a:2,b:2} );
+
+ assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$or:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
+
+ a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray();
+ checkArrs( [ { _id:0, x:0, a:1 } ], a1 );
+ if ( index ) {
+ assert( t.find( { x:0,$or: [ { a : 1 } ] } ).explain().cursor.match( /Btree/ ) );
+ }
+
+ a1b2 = t.find( { x:1, $or: [ { a : 1 }, { b : 2 } ] } ).toArray();
+ checkArrs( [ { _id:4, x:1, a:1, b:1 }, { _id:5, x:1, a:1, b:2 }, { _id:7, x:1, a:2, b:2 } ], a1b2 );
+ if ( index ) {
+ assert( t.find( { x:0,$or: [ { a : 1 } ] } ).explain().cursor.match( /Btree/ ) );
+ }
+
+ t.drop();
+ obj = {_id:0,x:10,a:[1,2,3]};
+ t.save( obj );
+ t.update( {x:10,$or:[ {a:2} ]}, {$set:{'a.$':100}} );
+ assert.eq( obj, t.findOne() ); // no change
+}
+
+doTest( false );
+
+t.ensureIndex( { x:1 } );
+doTest();
+
+t.drop();
+t.ensureIndex( { x:1,a:1 } );
+doTest();
+
+t.drop();
+t.ensureIndex( {x:1,b:1} );
+doTest();
+
+t.drop();
+t.ensureIndex( {x:1,a:1,b:1} );
+doTest();
diff --git a/jstests/or3.js b/jstests/or3.js
new file mode 100644
index 0000000..be85a8f
--- /dev/null
+++ b/jstests/or3.js
@@ -0,0 +1,64 @@
+t = db.jstests_or3;
+t.drop();
+
+checkArrs = function( a, b, m ) {
+ assert.eq( a.length, b.length, m );
+ aStr = [];
+ bStr = [];
+ a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
+ b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
+ for ( i in aStr ) {
+ assert( -1 != bStr.indexOf( aStr[ i ] ), m );
+ }
+}
+
+doTest = function( index ) {
+ if ( index == null ) {
+ index = true;
+ }
+
+ t.save( {_id:0,x:0,a:1} );
+ t.save( {_id:1,x:0,a:2} );
+ t.save( {_id:2,x:0,b:1} );
+ t.save( {_id:3,x:0,b:2} );
+ t.save( {_id:4,x:1,a:1,b:1} );
+ t.save( {_id:5,x:1,a:1,b:2} );
+ t.save( {_id:6,x:1,a:2,b:1} );
+ t.save( {_id:7,x:1,a:2,b:2} );
+
+ assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
+ assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$nor:[{x:0}]} ] } ).toArray(); } );
+
+ an1 = t.find( { $nor: [ { a : 1 } ] } ).toArray();
+ checkArrs( t.find( {a:{$ne:1}} ).toArray(), an1 );
+
+ an1bn2 = t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).toArray();
+ checkArrs( [ { _id:6, x:1, a:2, b:1 } ], an1bn2 );
+ checkArrs( t.find( { x:1, a:{$ne:1}, b:{$ne:2} } ).toArray(), an1bn2 );
+ if ( index ) {
+ assert( t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).explain().cursor.match( /Btree/ ) );
+ }
+
+ an1b2 = t.find( { $nor: [ { a : 1 } ], $or: [ { b : 2 } ] } ).toArray();
+ checkArrs( t.find( {a:{$ne:1},b:2} ).toArray(), an1b2 );
+}
+
+doTest( false );
+
+t.ensureIndex( { x:1 } );
+doTest();
+
+t.drop();
+t.ensureIndex( { x:1,a:1 } );
+doTest();
+
+t.drop();
+t.ensureIndex( {x:1,b:1} );
+doTest();
+
+t.drop();
+t.ensureIndex( {x:1,a:1,b:1} );
+doTest(); \ No newline at end of file
diff --git a/jstests/or4.js b/jstests/or4.js
new file mode 100644
index 0000000..af8704b
--- /dev/null
+++ b/jstests/or4.js
@@ -0,0 +1,98 @@
+t = db.jstests_or4;
+t.drop();
+
+checkArrs = function( a, b ) {
+ m = "[" + a + "] != [" + b + "]";
+ a = eval( a );
+ b = eval( b );
+ assert.eq( a.length, b.length, m );
+ aStr = [];
+ bStr = [];
+ a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
+ b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
+ for ( i in aStr ) {
+ assert( -1 != bStr.indexOf( aStr[ i ] ), m );
+ }
+}
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+t.save( {a:2} );
+t.save( {b:3} );
+t.save( {b:3} );
+t.save( {a:2,b:3} );
+
+assert.eq.automsg( "4", "t.count( {$or:[{a:2},{b:3}]} )" );
+assert.eq.automsg( "2", "t.count( {$or:[{a:2},{a:2}]} )" );
+
+assert.eq.automsg( "2", "t.find( {} ).skip( 2 ).count( true )" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).skip( 2 ).count( true )" );
+assert.eq.automsg( "1", "t.find( {$or:[{a:2},{b:3}]} ).skip( 3 ).count( true )" );
+
+assert.eq.automsg( "2", "t.find( {} ).limit( 2 ).count( true )" );
+assert.eq.automsg( "1", "t.find( {$or:[{a:2},{b:3}]} ).limit( 1 ).count( true )" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).limit( 2 ).count( true )" );
+assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).limit( 3 ).count( true )" );
+assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).limit( 4 ).count( true )" );
+
+t.remove({ $or: [{ a: 2 }, { b: 3}] });
+assert.eq.automsg( "0", "t.count()" );
+
+t.save( {b:3} );
+t.remove({ $or: [{ a: 2 }, { b: 3}] });
+assert.eq.automsg( "0", "t.count()" );
+
+t.save( {a:2} );
+t.save( {b:3} );
+t.save( {a:2,b:3} );
+
+t.update( {$or:[{a:2},{b:3}]}, {$set:{z:1}}, false, true );
+assert.eq.automsg( "3", "t.count( {z:1} )" );
+
+assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).toArray().length" );
+checkArrs( "t.find().toArray()", "t.find( {$or:[{a:2},{b:3}]} ).toArray()" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).skip(1).toArray().length" );
+
+assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length" );
+
+t.save( {a:1} );
+t.save( {b:4} );
+t.save( {a:2} );
+
+assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length" );
+assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).snapshot().toArray().length" );
+
+t.save( {a:1,b:3} );
+assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).batchSize(-4).toArray().length" );
+
+assert.eq.automsg( "[1,2]", "t.distinct( 'a', {$or:[{a:2},{b:3}]} )" );
+
+assert.eq.automsg( "[{a:2},{a:null},{a:1}]", "t.group( {key:{a:1}, cond:{$or:[{a:2},{b:3}]}, reduce:function( x, y ) { }, initial:{} } )" );
+assert.eq.automsg( "5", "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {query:{$or:[{a:2},{b:3}]}} ).counts.input" );
+
+explain = t.find( {$or:[{a:2},{b:3}]} ).explain();
+assert.eq.automsg( "2", "explain.clauses.length" );
+assert.eq.automsg( "5", "explain.n" );
+assert.eq.automsg( "6", "explain.nscanned" );
+
+t.remove( {} );
+
+t.save( {a:[1,2]} );
+assert.eq.automsg( "1", "t.find( {$or:[{a:1},{a:2}]} ).toArray().length" );
+assert.eq.automsg( "1", "t.count( {$or:[{a:1},{a:2}]} )" );
+assert.eq.automsg( "1", "t.find( {$or:[{a:2},{a:1}]} ).toArray().length" );
+assert.eq.automsg( "1", "t.count( {$or:[{a:2},{a:1}]} )" );
+
+t.remove();
+
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1}]} ).sort( {b:1} ).explain().cursor" );
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{}]} ).sort( {b:1} ).explain().cursor" );
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1},{a:3}]} ).sort( {b:1} ).explain().cursor" );
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1},{b:3}]} ).sort( {b:1} ).explain().cursor" );
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{b:1}]} ).sort( {b:1} ).explain().cursor" );
+assert.eq.automsg( "1", "t.find( {$or:[{b:1}]} ).sort( {b:1} ).explain().indexBounds.b[ 0 ][ 0 ].$minElement" );
+
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{a:1}]} ).hint( {b:1} ).explain().cursor" );
+assert.eq.automsg( "'BtreeCursor b_1'", "t.find( {$or:[{}]} ).hint( {b:1} ).explain().cursor" );
+assert.eq.automsg( "1", "t.find( {$or:[{b:1}]} ).hint( {b:1} ).explain().indexBounds.b[ 0 ][ 0 ]" );
diff --git a/jstests/or5.js b/jstests/or5.js
new file mode 100644
index 0000000..baa6bd6
--- /dev/null
+++ b/jstests/or5.js
@@ -0,0 +1,107 @@
+t = db.jstests_or5;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:2},{b:3},{}]} ).explain().cursor" );
+assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).explain().cursor" );
+assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:2},{b:3}]} ).sort( {c:1} ).explain().cursor" );
+e = t.find( {$or:[{a:2},{b:3}]} ).sort( {a:1} ).explain();
+assert.eq.automsg( "'BtreeCursor a_1'", "e.cursor" );
+assert.eq.automsg( "1", "e.indexBounds.a[ 0 ][ 0 ].$minElement" );
+assert.eq.automsg( "1", "e.indexBounds.a[ 0 ][ 1 ].$maxElement" );
+
+t.ensureIndex( {c:1} );
+
+t.save( {a:2} );
+t.save( {b:3} );
+t.save( {c:4} );
+t.save( {a:2,b:3} );
+t.save( {a:2,c:4} );
+t.save( {b:3,c:4} );
+t.save( {a:2,b:3,c:4} );
+
+assert.eq.automsg( "7", "t.count( {$or:[{a:2},{b:3},{c:4}]} )" );
+assert.eq.automsg( "6", "t.count( {$or:[{a:6},{b:3},{c:4}]} )" );
+assert.eq.automsg( "6", "t.count( {$or:[{a:2},{b:6},{c:4}]} )" );
+assert.eq.automsg( "6", "t.count( {$or:[{a:2},{b:3},{c:6}]} )" );
+
+assert.eq.automsg( "7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).toArray().length" );
+
+for( i = 2; i <= 7; ++i ) {
+assert.eq.automsg( "7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( i ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).batchSize( i ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).batchSize( i ).toArray().length" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).batchSize( i ).toArray().length" );
+}
+
+t.ensureIndex( {z:"2d"} );
+
+assert.eq.automsg( "'GeoSearchCursor'", "t.find( {z:{$near:[50,50]},a:2} ).explain().cursor" );
+assert.eq.automsg( "'GeoSearchCursor'", "t.find( {z:{$near:[50,50]},$or:[{a:2}]} ).explain().cursor" );
+assert.eq.automsg( "'GeoSearchCursor'", "t.find( {$or:[{a:2}],z:{$near:[50,50]}} ).explain().cursor" );
+assert.eq.automsg( "'GeoSearchCursor'", "t.find( {$or:[{a:2},{b:3}],z:{$near:[50,50]}} ).explain().cursor" );
+assert.throws.automsg( function() { return t.find( {$or:[{z:{$near:[50,50]}},{a:2}]} ).toArray(); } );
+
+function reset() {
+ t.drop();
+
+ t.ensureIndex( {a:1} );
+ t.ensureIndex( {b:1} );
+ t.ensureIndex( {c:1} );
+
+ t.save( {a:2} );
+ t.save( {a:2} );
+ t.save( {b:3} );
+ t.save( {b:3} );
+ t.save( {c:4} );
+ t.save( {c:4} );
+}
+
+reset();
+
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 1 ).itcount()" );
+assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 ).itcount()" );
+
+c = t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 );
+c.next();
+t.remove( {b:3} );
+assert.eq.automsg( "3", c.itcount() );
+
+reset();
+
+c = t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 );
+c.next();
+c.next();
+t.remove( {b:3} );
+assert.eq.automsg( "2", c.itcount() );
+
+reset();
+
+c = t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 );
+c.next();
+c.next();
+c.next();
+t.remove( {b:3} );
+assert.eq.automsg( "3", c.itcount() );
+
+reset();
+
+c = t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 );
+c.next();
+c.next();
+c.next();
+c.next();
+t.remove( {b:3} );
+assert.eq.automsg( "2", c.itcount() );
+
+t.drop();
+
+t.save( {a:[1,2]} );
+assert.eq.automsg( "1", "t.find( {$or:[{a:[1,2]}]} ).itcount()" );
+assert.eq.automsg( "1", "t.find( {$or:[{a:{$all:[1,2]}}]} ).itcount()" );
+assert.eq.automsg( "0", "t.find( {$or:[{a:{$all:[1,3]}}]} ).itcount()" );
diff --git a/jstests/or6.js b/jstests/or6.js
new file mode 100644
index 0000000..3800c78
--- /dev/null
+++ b/jstests/or6.js
@@ -0,0 +1,31 @@
+t = db.jstests_or6;
+t.drop();
+
+t.ensureIndex( {a:1} );
+
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:2}},{a:{$gt:0}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 1 ]" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$lt:2}},{a:{$lt:4}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 0 ]" );
+
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:5}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 1 ]" );
+assert.eq.automsg( "0", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:15}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 0 ]" );
+assert.eq.automsg( "15", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:15}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 1 ]" );
+
+// no separate clauses
+assert.eq.automsg( "null", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:3,$lt:5}}]} ).explain().clauses" );
+
+assert.eq.automsg( "20", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:3,$lt:5}},{a:{$gt:20}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 0 ]" );
+
+assert.eq.automsg( "null", "t.find( {$or:[{a:1},{b:2}]} ).hint( {a:1} ).explain().clauses" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:1},{a:3}]} ).hint( {a:1} ).explain().clauses.length" );
+assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:1},{a:3}]} ).hint( {$natural:1} ).explain().cursor" );
+
+t.ensureIndex( {b:1} );
+assert.eq.automsg( "2", "t.find( {$or:[{a:1,b:5},{a:3,b:5}]} ).hint( {a:1} ).explain().clauses.length" );
+
+t.drop();
+
+t.ensureIndex( {a:1,b:1} );
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$in:[1,2]},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:2},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
+assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:3},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
+assert.eq.automsg( "null", "t.find( {$or:[{a:{$in:[1,2]}}, {a:2}]} ).explain().clauses" ); \ No newline at end of file
diff --git a/jstests/or7.js b/jstests/or7.js
new file mode 100644
index 0000000..71538de
--- /dev/null
+++ b/jstests/or7.js
@@ -0,0 +1,41 @@
+t = db.jstests_or7;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:2} );
+
+assert.eq.automsg( "1", "t.count( {$or:[{a:{$in:[1,3]}},{a:2}]} )" );
+
+//SERVER-1201 ...
+
+t.remove();
+
+t.save( {a:"aa"} );
+t.save( {a:"ab"} );
+t.save( {a:"ad"} );
+
+assert.eq.automsg( "3", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+
+t.remove();
+
+t.save( {a:"aa"} );
+t.save( {a:"ad"} );
+
+assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+
+t.remove();
+
+t.save( {a:"aa"} );
+t.save( {a:"ac"} );
+
+assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+
+assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+
+t.save( {a:"ab"} );
+assert.eq.automsg( "3", "t.count( {$or:[{a:{$in:[/^ab/],$gte:'abc'}},{a:/^a/}]} )" );
+
+t.remove();
+t.save( {a:"a"} );
+t.save( {a:"b"} );
+assert.eq.automsg( "2", "t.count( {$or:[{a:{$gt:'a',$lt:'b'}},{a:{$gte:'a',$lte:'b'}}]} )" );
diff --git a/jstests/or8.js b/jstests/or8.js
new file mode 100644
index 0000000..7a5c709
--- /dev/null
+++ b/jstests/or8.js
@@ -0,0 +1,16 @@
+// missing collection
+
+t = db.jstests_or8;
+t.drop();
+
+t.find({ "$or": [ { "PropA": { "$lt": "b" } }, { "PropA": { "$lt": "b", "$gt": "a" } } ] }).toArray();
+
+// empty $in
+
+t.save( {a:1} );
+t.save( {a:3} );
+t.ensureIndex( {a:1} );
+t.find({ $or: [ { a: {$in:[]} } ] } ).toArray();
+assert.eq.automsg( "2", "t.find({ $or: [ { a: {$in:[]} }, {a:1}, {a:3} ] } ).toArray().length" );
+assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, { a: {$in:[]} }, {a:3} ] } ).toArray().length" );
+assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, {a:3}, { a: {$in:[]} } ] } ).toArray().length" );
diff --git a/jstests/or9.js b/jstests/or9.js
new file mode 100644
index 0000000..0df2153
--- /dev/null
+++ b/jstests/or9.js
@@ -0,0 +1,54 @@
+// index skipping and previous index range negation
+
+t = db.jstests_or9;
+t.drop();
+
+t.ensureIndex( {a:1,b:1} );
+
+t.save( {a:2,b:2} );
+
+function check( a, b, q ) {
+ count = a;
+ clauses = b;
+ query = q;
+ assert.eq.automsg( "count", "t.count( query )" );
+ if ( clauses == 1 ) {
+ assert.eq.automsg( "undefined", "t.find( query ).explain().clauses" );
+ } else {
+ assert.eq.automsg( "clauses", "t.find( query ).explain().clauses.length" );
+ }
+}
+
+check( 1, 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2 } ] } );
+check( 1, 2, { $or: [ { a: { $gt:2,$lte:3 } }, { a: 2 } ] } );
+
+check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2 } ] } );
+check( 1, 1, { $or: [ { b: { $gte:2,$lte:3 } }, { b: 2 } ] } );
+check( 1, 1, { $or: [ { b: { $gt:2,$lte:3 } }, { b: 2 } ] } );
+
+check( 1, 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2, b: 2 } ] } );
+check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 }, b:3 }, { a: 2 } ] } );
+
+check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2, a: 2 } ] } );
+check( 1, 2, { $or: [ { b: { $gte:1,$lte:3 }, a:3 }, { b: 2 } ] } );
+
+check( 1, 2, { $or: [ { a: { $gte:1,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
+check( 1, 2, { $or: [ { a: { $gte:2,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
+check( 1, 1, { $or: [ { a: { $gte:1,$lte:3 }, b: 2 }, { a: 2, b: 2 } ] } );
+
+check( 1, 2, { $or: [ { b: { $gte:1,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
+check( 1, 2, { $or: [ { b: { $gte:2,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
+check( 1, 1, { $or: [ { b: { $gte:1,$lte:3 }, a: 2 }, { a: 2, b: 2 } ] } );
+
+
+
+t.remove();
+
+t.save( {a:1,b:5} );
+t.save( {a:5,b:1} );
+
+check( 2, 1, { $or: [ { a: { $in:[1,5] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check( 2, 2, { $or: [ { a: { $in:[1] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check( 2, 2, { $or: [ { a: { $in:[1] }, b: { $in:[1] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+
+assert.eq.automsg( {a:[[1,1],[5,5]],b:[[1,1],[5,5]]}, "t.find( { $or: [ { a: { $in:[1] }, b: { $in:[1] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } ).explain().clauses[ 1 ].indexBounds" );
diff --git a/jstests/ora.js b/jstests/ora.js
new file mode 100644
index 0000000..67af4c1
--- /dev/null
+++ b/jstests/ora.js
@@ -0,0 +1,17 @@
+var t = db.jstests_ora;
+
+// $where
+t.drop();
+for (var i = 0; i < 10; i += 1) {
+ t.save({x: i, y: 10 - i});
+}
+assert.eq.automsg("1", "t.find({$or: [{$where: 'this.x === 2'}]}).count()");
+assert.eq.automsg("2", "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 2'}]}).count()");
+assert.eq.automsg("1", "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 8'}]}).count()");
+assert.eq.automsg("10", "t.find({$or: [{$where: 'this.x === 2'}, {x: {$ne: 2}}]}).count()");
+
+// geo
+t.drop();
+t.ensureIndex({loc: "2d"});
+
+assert.throws(function () {t.find({$or: [{loc: {$near: [11, 11]}}]}).limit(1).next()['_id'];});
diff --git a/jstests/orb.js b/jstests/orb.js
new file mode 100644
index 0000000..a4abdee
--- /dev/null
+++ b/jstests/orb.js
@@ -0,0 +1,17 @@
+// check neg direction index and negation
+
+var t = db.jstests_orb;
+t.drop();
+
+t.save( {a:1} );
+t.ensureIndex( {a:-1} );
+
+assert.eq.automsg( "1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )" );
+
+t.drop();
+
+t.save( {a:1,b:1} );
+t.ensureIndex( {a:1,b:-1} );
+
+assert.eq.automsg( "1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )" );
+assert.eq.automsg( "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )" ); \ No newline at end of file
diff --git a/jstests/pullall2.js b/jstests/pullall2.js
new file mode 100644
index 0000000..61369ba
--- /dev/null
+++ b/jstests/pullall2.js
@@ -0,0 +1,20 @@
+
+t = db.pullall2
+t.drop()
+
+o = { _id : 1 , a : [] }
+for ( i=0; i<5; i++ )
+ o.a.push( { x : i , y : i } )
+
+t.insert( o )
+
+assert.eq( o , t.findOne() , "A" );
+
+t.update( {} , { $pull : { a : { x : 3 } } } )
+o.a = o.a.filter( function(z){ return z.x != 3 } )
+assert.eq( o , t.findOne() , "B" );
+
+t.update( {} , { $pull : { a : { x : { $in : [ 1 , 4 ] } } } } );
+o.a = o.a.filter( function(z){ return z.x != 1 } )
+o.a = o.a.filter( function(z){ return z.x != 4 } )
+assert.eq( o , t.findOne() , "C" );
diff --git a/jstests/ref3.js b/jstests/ref3.js
index 77d6038..14037ee 100644
--- a/jstests/ref3.js
+++ b/jstests/ref3.js
@@ -1,19 +1,19 @@
// to run:
-// ./mongo jstests/ref.js
+// ./mongo jstests/ref3.js
-db.otherthings.drop();
-db.things.drop();
+db.otherthings3.drop();
+db.things3.drop();
var other = { s : "other thing", n : 1};
-db.otherthings.save(other);
+db.otherthings3.save(other);
-db.things.save( { name : "abc" } );
-x = db.things.findOne();
-x.o = new DBRef( "otherthings" , other._id );
-db.things.save(x);
+db.things3.save( { name : "abc" } );
+x = db.things3.findOne();
+x.o = new DBRef( "otherthings3" , other._id );
+db.things3.save(x);
-assert( db.things.findOne().o.fetch().n == 1, "dbref broken 2" );
+assert( db.things3.findOne().o.fetch().n == 1, "dbref broken 2" );
other.n++;
-db.otherthings.save(other);
-assert( db.things.findOne().o.fetch().n == 2, "dbrefs broken" );
+db.otherthings3.save(other);
+assert( db.things3.findOne().o.fetch().n == 2, "dbrefs broken" );
diff --git a/jstests/regex5.js b/jstests/regex5.js
index 418752b..1a9a9e3 100644
--- a/jstests/regex5.js
+++ b/jstests/regex5.js
@@ -31,17 +31,17 @@ print( "now indexed" );
doit();
// check bound unions SERVER-322
-assert.eq( [
- [ {x:1},{x:1} ],
- [ {x:2.5},{x:2.5} ],
- [ {x:"a"},{x:"a"} ],
- [ {x:"b"},{x:"e"} ],
- [ {x:/^b/},{x:/^b/} ],
- [ {x:/^c/},{x:/^c/} ],
- [ {x:/^d/},{x:/^d/} ]
- ],
+assert.eq( {
+ x:[[1,1],
+ [2.5,2.5],
+ ["a","a"],
+ ["b","e"],
+ [/^b/,/^b/],
+ [/^c/,/^c/],
+ [/^d/,/^d/]]
+ },
t.find( { x : { $in: [ 1, 2.5, "a", "b", /^b/, /^c/, /^d/ ] } } ).explain().indexBounds );
// SERVER-505
-assert.eq( [ [ {x:"a"}, {x:"a"} ] ], t.find( { x : { $all: [ "a", /^a/ ] } } ).explain().indexBounds );
-assert.eq( [ [ {x:"a"}, {x:"b"} ] ], t.find( { x : { $all: [ /^a/ ] } } ).explain().indexBounds );
+assert.eq( {x:[["a","a"]]}, t.find( { x : { $all: [ "a", /^a/ ] } } ).explain().indexBounds );
+assert.eq( {x:[["a","b"]]}, t.find( { x : { $all: [ /^a/ ] } } ).explain().indexBounds );
diff --git a/jstests/repair.js b/jstests/repair.js
index 5548c2b..338fe52 100644
--- a/jstests/repair.js
+++ b/jstests/repair.js
@@ -1,6 +1,6 @@
t = db.jstests_repair;
t.drop();
t.save( { i:1 } );
-db.repairDatabase();
+assert.commandWorked( db.repairDatabase() );
v = t.validate();
assert( v.valid , "not valid! " + tojson( v ) );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 0af26ac..701d71e 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -9,25 +9,6 @@ s = rt.start( false );
function block(){
am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
- sleep(3000); // 1.4 branch doesn't support w
-}
-
-function hash( db ){
- var s = "";
- var a = db.getCollectionNames();
- a = a.sort();
- a.forEach(
- function(cn){
- var c = db.getCollection( cn );
- s += cn + "\t" + c.find().count() + "\n";
- c.find().sort( { _id : 1 } ).forEach(
- function(o){
- s += tojson( o , "" , true ) + "\n";
- }
- );
- }
- );
- return s;
}
am = m.getDB( "foo" );
@@ -37,13 +18,13 @@ function check( note ){
var start = new Date();
var x,y;
while ( (new Date()).getTime() - start.getTime() < 30000 ){
- x = hash( am );
- y = hash( as );
- if ( x == y )
+ x = am.runCommand( "dbhash" );
+ y = as.runCommand( "dbhash" );
+ if ( x.md5 == y.md5 )
return;
sleep( 200 );
}
- assert.eq( x , y , note );
+ assert.eq( x.md5 , y.md5 , note );
}
am.a.save( { x : 1 } );
@@ -121,12 +102,12 @@ t.update( { "b" : 3} , { $set : { "b.$" : 17 } } )
block();
check( "after pos 4 " );
+
printjson( am.rpos.findOne() )
printjson( as.rpos.findOne() )
//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson )
-
t = am.b;
t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 6743} } , true, false)
block()
@@ -145,6 +126,30 @@ assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.f
check( "b 4" );
+// lots of indexes
+
+am.lotOfIndexes.insert( { x : 1 } )
+for ( i=0; i<200; i++ ){
+ var idx = {}
+ idx["x"+i] = 1;
+ am.lotOfIndexes.ensureIndex( idx );
+ am.getLastError()
+}
+
+
+assert.soon( function(){ return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length; } , "lots of indexes a" )
+
+assert.eq( am.lotOfIndexes.getIndexes().length , as.lotOfIndexes.getIndexes().length , "lots of indexes b" )
+
+// multi-update with $inc
+
+am.mu1.update( { _id : 1 , $atomic : 1 } , { $inc : { x : 1 } } , true , true )
+x = { _id : 1 , x : 1 }
+assert.eq( x , am.mu1.findOne() , "mu1" );
+assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEqual( x , z ); } , "mu2" )
+
+
+
rt.stop();
diff --git a/jstests/repl/block1.js b/jstests/repl/block1.js
new file mode 100644
index 0000000..e358ba3
--- /dev/null
+++ b/jstests/repl/block1.js
@@ -0,0 +1,24 @@
+
+var rt = new ReplTest( "block1" );
+
+m = rt.start( true );
+s = rt.start( false );
+
+dbm = m.getDB( "foo" );
+dbs = s.getDB( "foo" );
+
+tm = dbm.bar;
+ts = dbs.bar;
+
+for ( var i=0; i<1000; i++ ){
+ tm.insert( { _id : i } );
+ dbm.runCommand( { getlasterror : 1 , w : 2 } )
+ assert.eq( i + 1 , ts.count() , "A" + i );
+ assert.eq( i + 1 , tm.count() , "B" + i );
+}
+
+rt.stop();
+
+
+
+
diff --git a/jstests/repl/block2.js b/jstests/repl/block2.js
new file mode 100644
index 0000000..0e34758
--- /dev/null
+++ b/jstests/repl/block2.js
@@ -0,0 +1,45 @@
+
+var rt = new ReplTest( "block1" );
+
+m = rt.start( true );
+s = rt.start( false );
+
+function setup(){
+
+ dbm = m.getDB( "foo" );
+ dbs = s.getDB( "foo" );
+
+ tm = dbm.bar;
+ ts = dbs.bar;
+}
+setup();
+
+function check( msg ){
+ assert.eq( tm.count() , ts.count() , "check: " + msg );
+}
+
+check( "A" );
+
+tm.save( { x : 1 } );
+dbm.getLastError( 2 );
+check( "B" );
+
+tm.save( { x : 2 } );
+dbm.getLastError( 2 , 500 );
+check( "C" );
+
+rt.stop( false );
+tm.save( { x : 3 } )
+assert.eq( 3 , tm.count() , "D1" );
+assert.throws( function(){ dbm.getLastError( 2 , 500 ); } , "D2" )
+
+s = rt.start( false )
+setup();
+dbm.getLastError( 2 , 30000 )
+check( "D3" )
+
+rt.stop();
+
+
+
+
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
new file mode 100644
index 0000000..d0fcec3
--- /dev/null
+++ b/jstests/repl/mastermaster1.js
@@ -0,0 +1,34 @@
+// basic testing of master/master
+
+
+ports = allocatePorts( 2 )
+
+left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } )
+right = startMongodTest( ports[1] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
+
+x = left.getDB( "admin" ).runCommand( "ismaster" )
+assert( x.ismaster , "left: " + tojson( x ) )
+
+x = right.getDB( "admin" ).runCommand( "ismaster" )
+assert( x.ismaster , "right: " + tojson( x ) )
+
+ldb = left.getDB( "test" )
+rdb = right.getDB( "test" )
+
+ldb.foo.insert( { _id : 1 , x : "eliot" } )
+ldb.runCommand( { getlasterror : 1 , w : 2 } )
+rdb.foo.insert( { _id : 2 , x : "sara" } )
+rdb.runCommand( { getlasterror : 1 , w : 2 } )
+
+assert.eq( 2 , ldb.foo.count() , "B1" )
+assert.eq( 2 , rdb.foo.count() , "B2" )
+
+
+
+for ( var i=0; i<ports.length; i++ ){
+ stopMongod( ports[i] );
+}
+
+
+
+
diff --git a/jstests/repl/repair.js b/jstests/repl/repair.js
new file mode 100644
index 0000000..9bdaef3
--- /dev/null
+++ b/jstests/repl/repair.js
@@ -0,0 +1,14 @@
+// Test repair on master
+
+var baseName = "jstests_repl_repair";
+
+rt = new ReplTest( baseName );
+
+m = rt.start( true );
+
+m.getDB( baseName )[ baseName ].save( {} );
+var c = m.getDB( 'local' ).oplog.$main.count();
+assert.automsg( "c > 0" );
+
+assert.commandWorked( m.getDB( "local" ).repairDatabase() );
+assert.automsg( "c <= m.getDB( 'local' ).oplog.$main.count()" );
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
index f519b17..c2983dc 100644
--- a/jstests/repl/replacePeer2.js
+++ b/jstests/repl/replacePeer2.js
@@ -44,8 +44,16 @@ doTest = function( signal ) {
checkWrite( rp.master(), rp.slave() );
// allow slave to finish initial sync
- assert.soon( function() { return 1 == rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} ).ok; } );
-
+ assert.soon(
+ function() {
+ var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
+ if ( res.ok == 1 )
+ return true;
+ printjson( res );
+ return false;
+ }
+ );
+
// Should not be saved to slave.
writeOne( rp.master() );
// Make sure there would be enough time to save to l if we hadn't called replacepeer.
@@ -81,3 +89,5 @@ doTest = function( signal ) {
doTest( 15 ); // SIGTERM
doTest( 9 ); // SIGKILL
+
+print("replace2Peer finishes");
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
index 4ebd786..d65cad7 100644
--- a/jstests/repl/snapshot2.js
+++ b/jstests/repl/snapshot2.js
@@ -13,12 +13,13 @@ rp = new ReplPair( l, r, a );
rp.start();
rp.waitForSteadyState();
-big = new Array( 2000 ).toString();
+big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
rp.slave().setSlaveOk();
-for( i = 0; i < 1000; ++i ) {
+for( i = 0; i < 500; ++i ) {
rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
if ( i % 250 == 249 ) {
assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+ sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
}
}
@@ -40,11 +41,11 @@ rp.left_.extraArgs_ = [ "--fastsync" ];
rp.start( true );
rp.waitForSteadyState();
-assert.eq( 1000, rp.master().getDB( baseName )[ baseName ].count() );
+assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
rp.slave().setSlaveOk();
-assert.eq( 1000, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:1000} );
-assert.soon( function() { return 1001 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
+rp.master().getDB( baseName )[ baseName ].save( {i:500} );
+assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
assert( !rawMongoProgramOutput().match( /resync/ ) );
assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
index 296ebd0..d8d268d 100644
--- a/jstests/repl/snapshot3.js
+++ b/jstests/repl/snapshot3.js
@@ -13,12 +13,13 @@ rp = new ReplPair( l, r, a );
rp.start();
rp.waitForSteadyState();
-big = new Array( 2000 ).toString();
+big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
rp.slave().setSlaveOk();
-for( i = 0; i < 1000; ++i ) {
+for( i = 0; i < 500; ++i ) {
rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
if ( i % 250 == 249 ) {
assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+ sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
}
}
@@ -40,11 +41,13 @@ rp.left_.extraArgs_ = [ "--fastsync" ];
rp.start( true );
rp.waitForSteadyState();
-assert.eq( 1000, rp.master().getDB( baseName )[ baseName ].count() );
+assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
rp.slave().setSlaveOk();
-assert.eq( 1000, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:1000} );
-assert.soon( function() { return 1001 == rp.slave().getDB( baseName )[ baseName ].count(); } );
+assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
+rp.master().getDB( baseName )[ baseName ].save( {i:500} );
+assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) ); \ No newline at end of file
+assert( !rawMongoProgramOutput().match( /SyncException/ ) );
+
+print("snapshot3.js finishes");
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
new file mode 100644
index 0000000..6a18dff
--- /dev/null
+++ b/jstests/replsets/replset1.js
@@ -0,0 +1,115 @@
+doTest = function( signal ) {
+
+ // Test basic replica set functionality.
+ // -- Replication
+ // -- Failover
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Calling getMaster also makes available the liveNodes structure,
+ // which looks like this:
+ // liveNodes = {master: masterNode,
+ // slaves: [slave1, slave2]
+ // }
+ printjson(replTest.liveNodes);
+
+ // Here's how you save something to master
+ master.getDB("foo").foo.save({a: 1000});
+
+ // This method will check the oplogs of the master
+ // and slaves in the set and wait until the change has replicated.
+ replTest.awaitReplication();
+
+
+ cppconn = new Mongo( replTest.getURL() ).getDB( "foo" );
+ assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 1" );
+
+ {
+ // check c++ finding other servers
+ var temp = replTest.getURL();
+ temp = temp.substring( 0 , temp.lastIndexOf( "," ) );
+ temp = new Mongo( temp ).getDB( "foo" );
+ assert.eq( 1000 , temp.foo.findOne().a , "cppconn 1" );
+ }
+
+
+ // Here's how to stop the master node
+ var master_id = replTest.getNodeId( master );
+ replTest.stop( master_id );
+
+ // Now let's see who the new master is:
+ var new_master = replTest.getMaster();
+
+ // Is the new master the same as the old master?
+ var new_master_id = replTest.getNodeId( new_master );
+
+ assert( master_id != new_master_id, "Old master shouldn't be equal to new master." );
+
+ {
+ // this may fail since it has to reconnect
+ try {
+ cppconn.foo.findOne()
+ }
+ catch ( e ){
+ }
+ assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 2" );
+
+ }
+
+ // Now let's write some documents to the new master
+ for(var i=0; i<1000; i++) {
+ new_master.getDB("bar").bar.save({a: i});
+ }
+ new_master.getDB("admin").runCommand({getlasterror: 1});
+
+ // Here's how to restart the old master node:
+ slave = replTest.restart( master_id );
+
+
+ // Now, let's make sure that the old master comes up as a slave
+ assert.soon(function() {
+ var res = slave.getDB("admin").runCommand({ismaster: 1});
+ printjson(res);
+ return res['ok'] == 1 && res['ismaster'] == false;
+ });
+
+ // And we need to make sure that the replset comes back up
+ assert.soon(function() {
+ var res = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson( res );
+ return res.myState == 1;
+ });
+
+ // And that both slave nodes have all the updates
+ new_master = replTest.getMaster();
+ assert.eq( 1000 , new_master.getDB( "bar" ).runCommand( { count:"bar"} ).n , "assumption 2")
+ replTest.awaitReplication();
+
+ slaves = replTest.liveNodes.slaves;
+ assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length );
+ slaves.forEach(function(slave) {
+ slave.setSlaveOk();
+ var count = slave.getDB("bar").runCommand({count: "bar"});
+ printjson( count );
+ assert.eq( 1000 , count.n , "slave count wrong: " + slave );
+ });
+
+ // Shut down the set and finish the test.
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
new file mode 100644
index 0000000..eaa35ee
--- /dev/null
+++ b/jstests/replsets/replset2.js
@@ -0,0 +1,111 @@
+
+doTest = function( signal ) {
+
+ // FAILING TEST
+ // See below:
+
+ // Test replication with getLastError
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Wait for replication to a single node
+ master.getDB("test").bar.insert({n: 1});
+
+ // Wait for initial sync
+ replTest.awaitReplication();
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves.forEach(function(slave) { slave.setSlaveOk(); });
+
+ var testDB = "repl-test";
+
+ var failed = false;
+ var callGetLastError = function(w, timeout, db) {
+ var result = master.getDB(db).getLastErrorObj( w , timeout );
+ printjson( result );
+ if(result['ok'] != 1) {
+ print("FAILURE");
+ failed = true;
+ }
+ }
+
+ // Test getlasterror with multiple inserts
+ // TEST FAILS HERE
+ print("**** Try inserting a multiple records -- first insert ****")
+ master.getDB(testDB).foo.insert({n: 1});
+ master.getDB(testDB).foo.insert({n: 2});
+ master.getDB(testDB).foo.insert({n: 3});
+ callGetLastError(3, 10000, testDB);
+
+ print("**** TEMP 1a ****")
+
+ m1 = master.getDB(testDB).foo.findOne({n: 1});
+ printjson( m1 );
+ assert( m1['n'] == 1 , "Failed to save to master on multiple inserts");
+
+ print("**** TEMP 1b ****")
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
+ assert( s0['n'] == 1 , "Failed to replicate to slave 0 on multiple inserts");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
+ assert( s1['n'] == 1 , "Failed to replicate to slave 1 on multiple inserts");
+
+
+ // Test getlasterror with a simple insert
+ print("**** Try inserting a single record ****")
+ master.getDB(testDB).dropDatabase();
+ master.getDB(testDB).foo.insert({n: 1});
+ callGetLastError(3, 10000, testDB);
+
+ m1 = master.getDB(testDB).foo.findOne({n: 1});
+ printjson( m1 );
+ assert( m1['n'] == 1 , "Failed to save to master");
+
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
+ assert( s0['n'] == 1 , "Failed to replicate to slave 0");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
+ assert( s1['n'] == 1 , "Failed to replicate to slave 1");
+
+
+ // Test getlasterror with large insert
+ print("**** Try inserting many records ****")
+ bigData = new Array(2000).toString()
+ for(var n=0; n<1000; n++) {
+ master.getDB(testDB).baz.insert({n: n, data: bigData});
+ }
+ callGetLastError(3, 60000, testDB);
+
+ var verifyReplication = function(nodeName, collection) {
+ data = collection.findOne({n: 1});
+ assert( data['n'] == 1 , "Failed to save to " + nodeName);
+ data = collection.findOne({n: 999});
+ assert( data['n'] == 999 , "Failed to save to " + nodeName);
+ }
+
+ verifyReplication("master", master.getDB(testDB).baz);
+ verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
+ verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+
+ assert( failed == false, "Replication with getLastError failed. See errors." );
+
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
new file mode 100644
index 0000000..8126b9d
--- /dev/null
+++ b/jstests/replsets/replset3.js
@@ -0,0 +1,56 @@
+
+doTest = function( signal ) {
+
+ // Test replica set step down
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Get master node
+ var master = replTest.getMaster();
+
+ // Write some data to master
+ // NOTE: this test fails unless we write some data.
+ master.getDB("foo").foo.save({a: 1});
+ master.getDB("foo").runCommand({getlasterror: 1, w:3, wtimeout: 20000});
+
+ // Step down master
+ master.getDB("admin").runCommand({replSetStepDown: true});
+
+ try {
+ var new_master = replTest.getMaster();
+ }
+ catch( err ) {
+ throw( "Could not elect new master before timeout." );
+ }
+
+ assert( master != new_master, "Old master shouldn't be equal to new master." );
+
+ // Make sure that slaves are still up
+ var result = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
+ assert( result['ok'] == 1, "Could not verify that slaves were still up:" + result );
+
+ slaves = replTest.liveNodes.slaves;
+ assert.soon(function() {
+ res = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1})
+ return res.myState == 2;
+ }, "Slave 0 state not ready.");
+
+ assert.soon(function() {
+ res = slaves[1].getDB("admin").runCommand({replSetGetStatus: 1})
+ return res.myState == 2;
+ }, "Slave 1 state not ready.");
+
+ replTest.stopSet( 15 );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
new file mode 100644
index 0000000..4f6c454
--- /dev/null
+++ b/jstests/replsets/replset4.js
@@ -0,0 +1,29 @@
+doTest = function( signal ) {
+
+ // Test orphaned master steps down
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ replTest.startSet();
+ replTest.initiate();
+
+ var master = replTest.getMaster();
+
+ // Kill both slaves, simulating a network partition
+ var slaves = replTest.liveNodes.slaves;
+ for(var i=0; i<slaves.length; i++) {
+ var slave_id = replTest.getNodeId(slaves[i]);
+ replTest.stop( slave_id );
+ }
+
+ var result = master.getDB("admin").runCommand({ismaster: 1});
+ printjson( result );
+ assert.soon(function() {
+ var result = master.getDB("admin").runCommand({ismaster: 1});
+ printjson( result );
+ return (result['ok'] == 1 && result['ismaster'] == false);
+ }, "Master fails to step down when orphaned.");
+
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replset_remove_node.js b/jstests/replsets/replset_remove_node.js
new file mode 100644
index 0000000..e06a951
--- /dev/null
+++ b/jstests/replsets/replset_remove_node.js
@@ -0,0 +1,57 @@
+doTest = function( signal ) {
+
+ // Make sure that we can manually shutdown a remove a
+ // slave from the configuration.
+
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Reconfigure the set, removing the unwanted node
+ slaveId = replTest.getNodeId( replTest.liveNodes.slaves[0] );
+
+ // Shut down the unwanted node
+ replTest.stop( slaveId );
+
+ // Remove that node from the configuration
+ replTest.remove( slaveId );
+
+ // Then, reinitiate
+ replTest.reInitiate();
+
+ // Make sure that a new master comes up
+ master = replTest.getMaster();
+ slaves = replTest.liveNodes.slaves;
+
+ // Do a status check on each node
+ // Master should be set to 1 (primary)
+ assert.soon(function() {
+ stat = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson( stat );
+ return stat.myState == 1;
+ }, "Master failed to come up as master.", 60000);
+
+ // Slaves to be set to 2 (secondary)
+ assert.soon(function() {
+ stat = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
+ return stat.myState == 2;
+ }, "Slave failed to come up as slave.", 60000);
+
+ assert.soon(function() {
+ stat = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
+ return stat.members.length == 2;
+ }, "Wrong number of members", 60000);
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replsetadd.js b/jstests/replsets/replsetadd.js
new file mode 100644
index 0000000..673e1d7
--- /dev/null
+++ b/jstests/replsets/replsetadd.js
@@ -0,0 +1,31 @@
+
+doTest = function( signal ) {
+ // Test add node
+
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 0} );
+
+ var first = replTest.add();
+
+ // Initiate replica set
+ assert.soon(function() {
+ var res = first.getDB("admin").runCommand({replSetInitiate: null});
+ return res['ok'] == 1;
+ });
+
+ // Get status
+ assert.soon(function() {
+ var result = first.getDB("admin").runCommand({replSetGetStatus: true});
+ return result['ok'] == 1;
+ });
+
+ // Start a second node
+ var second = replTest.add();
+
+ // Add the second node.
+ // This runs the equivalent of rs.add(newNode);
+ replTest.reInitiate();
+
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replsetarb1.js b/jstests/replsets/replsetarb1.js
new file mode 100644
index 0000000..a323290
--- /dev/null
+++ b/jstests/replsets/replsetarb1.js
@@ -0,0 +1,33 @@
+// FAILING TEST
+// no primary is ever elected if the first server is an arbiter
+
+doTest = function( signal ) {
+
+ var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+ var nodes = replTest.nodeList();
+
+ print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({"_id" : "unicomplex",
+ "members" : [
+ {"_id" : 0, "host" : nodes[0], "arbiterOnly" : true},
+ {"_id" : 1, "host" : nodes[1]},
+ {"_id" : 2, "host" : nodes[2]}]});
+
+ // Make sure we have a master
+ // Neither this
+ var master = replTest.getMaster();
+
+ // Make sure we have an arbiter
+ // Nor this will succeed
+ assert.soon(function() {
+ res = conns[0].getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(res);
+ return res.myState == 7;
+ }, "Aribiter failed to initialize.");
+
+ replTest.stopSet( signal );
+}
+
+// doTest( 15 );
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
new file mode 100644
index 0000000..0dd8a3d
--- /dev/null
+++ b/jstests/replsets/replsetarb2.js
@@ -0,0 +1,45 @@
+// Election when master fails and remaining nodes are an arbiter and a slave.
+// Note that in this scenario, the arbiter needs two votes.
+
+doTest = function( signal ) {
+
+ var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+ var nodes = replTest.nodeList();
+
+ print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({"_id" : "unicomplex",
+ "members" : [
+ {"_id" : 0, "host" : nodes[0] },
+ {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 2},
+ {"_id" : 2, "host" : nodes[2] }]});
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+
+ // Make sure we have an arbiter
+ assert.soon(function() {
+ res = conns[1].getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(res);
+ return res.myState == 7;
+ }, "Aribiter failed to initialize.");
+
+ // Wait for initial replication
+ master.getDB("foo").foo.insert({a: "foo"});
+ replTest.awaitReplication();
+
+ // Now kill the original master
+ mId = replTest.getNodeId( master );
+ replTest.stop( mId );
+
+ // And make sure that the slave is promoted
+ new_master = replTest.getMaster();
+
+ newMasterId = replTest.getNodeId( new_master );
+ assert( newMasterId == 2, "Slave wasn't promoted to new master");
+
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
new file mode 100644
index 0000000..a002476
--- /dev/null
+++ b/jstests/replsets/replsetprio1.js
@@ -0,0 +1,53 @@
+// FAILING TEST
+// should check that election happens in priority order
+
+doTest = function( signal ) {
+
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ var nodes = replTest.nodeList();
+
+ replTest.startSet();
+ replTest.node[0].initiate({"_id" : "unicomplex",
+ "members" : [
+ {"_id" : 0, "host" : nodes[0], "priority" : 1},
+ {"_id" : 1, "host" : nodes[1], "priority" : 2},
+ {"_id" : 2, "host" : nodes[2], "priority" : 3}]});
+
+ sleep(10000);
+
+ // 2 should be master
+ var m3 = replTest.nodes[2].runCommand({ismaster:1})
+
+ // FAILS: node[0] is elected master, regardless of priority
+ assert(m3.ismaster, 'highest priority is master');
+
+ // kill 2, 1 should take over
+ var m3Id = replTest.getNodeId(nodes[2]);
+ replTest.stop(m3Id);
+
+ sleep(10000);
+
+ var m2 = replTest.nodes[1].runCommand({ismaster:1})
+ assert(m2.ismaster, 'node 2 is master');
+
+ // bring 2 back up, nothing should happen
+ replTest.start(m3Id);
+
+ sleep(10000);
+
+ m2 = replTest.nodes[1].runCommand({ismaster:1})
+ assert(m2.ismaster, 'node 2 is still master');
+
+ // kill 1, 2 should become master
+ var m2Id = replTest.getNodeId(nodes[1]);
+ replTest.stop(m2Id);
+
+ sleep(10000);
+
+ m3 = replTest.nodes[2].runCommand({ismaster:1})
+ assert(m3.ismaster, 'node 3 is master');
+
+ replTest.stopSet( signal );
+}
+
+//doTest( 15 );
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
new file mode 100644
index 0000000..65adaf4
--- /dev/null
+++ b/jstests/replsets/replsetrestart1.js
@@ -0,0 +1,57 @@
+doTest = function( signal ) {
+
+ // Make sure that we can restart a replica set completely
+
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Now we're going to shut down all nodes
+ mId = replTest.getNodeId( master );
+ s1Id = replTest.getNodeId( replTest.liveNodes.slaves[0] );
+ s2Id = replTest.getNodeId( replTest.liveNodes.slaves[1] );
+
+ replTest.stop( mId );
+ replTest.stop( s1Id );
+ replTest.stop( s2Id );
+
+ // Now let's restart these nodes
+ replTest.restart( mId );
+ replTest.restart( s1Id );
+ replTest.restart( s2Id );
+
+ // Make sure that a new master comes up
+ master = replTest.getMaster();
+ slaves = replTest.liveNodes.slaves;
+
+ // Do a status check on each node
+ // Master should be set to 1 (primary)
+ assert.soon(function() {
+ stat = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ return stat.myState == 1;
+ });
+
+ // Slaves to be set to 2 (secondary)
+ assert.soon(function() {
+ stat = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
+ return stat.myState == 2;
+ });
+
+ assert.soon(function() {
+ stat = slaves[1].getDB("admin").runCommand({replSetGetStatus: 1});
+ return stat.myState == 2;
+ });
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/replsetrestart2.js b/jstests/replsets/replsetrestart2.js
new file mode 100644
index 0000000..324bd37
--- /dev/null
+++ b/jstests/replsets/replsetrestart2.js
@@ -0,0 +1,46 @@
+// config saved on shutdown
+
+var compare_configs = function(c1, c2) {
+ assert(c1.version == c2.version, 'version same');
+ assert(c1._id == c2._id, '_id same');
+
+ printjson(c1);
+ printjson(c2);
+
+ for (var i in c1.members) {
+ assert(c2.members[i] !== undefined, 'field '+i+' exists in both configs');
+ assert(c1.members[i]._id == c2.members[i]._id, 'id is equal in both configs');
+ assert(c1.members[i].host == c2.members[i].host, 'id is equal in both configs');
+ }
+}
+
+doTest = function( signal ) {
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ replTest.startSet();
+
+ sleep(5000);
+
+ replTest.initiate();
+
+ sleep(5000);
+
+ var master = replTest.getMaster();
+ var config = master.getDB("local").system.replset.findOne();
+
+ replTest.stopSet( signal , true );
+
+ replTest.restart(0);
+ replTest.restart(1);
+ replTest.restart(2);
+
+ sleep(5000);
+
+ master = replTest.getMaster();
+ var config2 = master.getDB("local").system.replset.findOne();
+
+ compare_configs(config, config2);
+
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js
new file mode 100644
index 0000000..f072d61
--- /dev/null
+++ b/jstests/replsets/rollback.js
@@ -0,0 +1,129 @@
+// test rollback in replica sets
+
+// try running as :
+//
+// mongo --nodb rollback.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ sleep(1000);
+ }
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+ a.bar.insert({ q: 1, a: "foo" });
+ a.bar.insert({ q: 2, a: "foo", x: 1 });
+ a.bar.insert({ q: 3, bb: 9, a: "foo" });
+
+ assert(a.bar.count() == 3, "t.count");
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == 3; });
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ wait(function () { return B.isMaster().ismaster; });
+
+ b.bar.insert({ q: 4 });
+ b.bar.insert({ q: 5 });
+ b.bar.insert({ q: 6 });
+ assert(b.bar.count() == 6, "u.count");
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ A.runCommand({ replSetTest: 1, blind: false });
+ wait(function () { return !B.isMaster().ismaster; });
+ wait(function () { return A.isMaster().ismaster; });
+
+ assert(a.bar.count() == 3, "t is 3");
+ a.bar.insert({ q: 7 });
+ a.bar.insert({ q: 8 });
+ {
+ assert(a.bar.count() == 5);
+ var x = a.bar.find().toArray();
+ assert(x[0].q == 1, '1');
+ assert(x[1].q == 2, '2');
+ assert(x[2].q == 3, '3');
+ assert(x[3].q == 7, '7');
+ assert(x[4].q == 8, '8');
+ }
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ B.runCommand({ replSetTest: 1, blind: false });
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+
+ friendlyEqual(a.bar.find().sort({ _id: 1 }).toArray(), b.bar.find().sort({ _id: 1 }).toArray(), "server data sets do not match");
+
+ pause("SUCCESS");
+ replTest.stopSet(signal);
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
new file mode 100644
index 0000000..f9c48ff
--- /dev/null
+++ b/jstests/replsets/rollback2.js
@@ -0,0 +1,199 @@
+// test rollback in replica sets
+
+// try running as :
+//
+// mongo --nodb rollback.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ sleep(1000);
+ }
+}
+
+function dbs_match(a, b) {
+ print("dbs_match");
+
+ var ac = a.system.namespaces.find().sort({name:1}).toArray();
+ var bc = b.system.namespaces.find().sort({name:1}).toArray();
+ if (!friendlyEqual(ac, bc)) {
+ print("dbs_match: namespaces don't match");
+ print("\n\n");
+ printjson(ac);
+ print("\n\n");
+ printjson(bc);
+ print("\n\n");
+ return false;
+ }
+
+ var c = a.getCollectionNames();
+ for( var i in c ) {
+ print("checking " + c[i]);
+ if( !friendlyEqual( a[c[i]].find().sort({_id:1}).toArray(), b[c[i]].find().sort({_id:1}).toArray() ) ) {
+ print("dbs_match: collections don't match " + c[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+/* these writes will be initial data and replicate everywhere. */
+function doInitialWrites(db) {
+ t = db.bar;
+ t.insert({ q:0});
+ t.insert({ q: 1, a: "foo" });
+ t.insert({ q: 2, a: "foo", x: 1 });
+ t.insert({ q: 3, bb: 9, a: "foo" });
+ t.insert({ q: 40, a: 1 });
+ t.insert({ q: 40, a: 2 });
+ t.insert({ q: 70, txt: 'willremove' });
+
+ db.createCollection("kap", { capped: true, size: 5000 });
+ db.kap.insert({ foo: 1 })
+
+ // going back to empty on capped is a special case and must be tested
+ db.createCollection("kap2", { capped: true, size: 5501 });
+}
+
+/* these writes on one primary only and will be rolled back. */
+function doItemsToRollBack(db) {
+ t = db.bar;
+ t.insert({ q: 4 });
+ t.update({ q: 3 }, { q: 3, rb: true });
+
+ t.remove({ q: 40 }); // multi remove test
+
+ t.update({ q: 2 }, { q: 39, rb: true });
+
+ // rolling back a delete will involve reinserting the item(s)
+ t.remove({ q: 1 });
+
+ t.update({ q: 0 }, { $inc: { y: 1} });
+
+ db.kap.insert({ foo: 2 })
+ db.kap2.insert({ foo: 2 })
+
+ // create a collection (need to roll back the whole thing)
+ db.newcoll.insert({ a: true });
+
+ // create a new empty collection (need to roll back the whole thing)
+ db.createCollection("abc");
+}
+
+function doWritesToKeep2(db) {
+ t = db.bar;
+ t.insert({ txt: 'foo' });
+ t.remove({ q: 70 });
+ t.update({ q: 0 }, { $inc: { y: 33} });
+}
+
+function verify(db) {
+ print("verify");
+ t = db.bar;
+ assert(t.find({ q: 1 }).count() == 1);
+ assert(t.find({ txt: 'foo' }).count() == 1);
+ assert(t.find({ q: 4 }).count() == 0);
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+ doInitialWrites(a);
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == a.bar.count(); });
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ wait(function () { return B.isMaster().ismaster; });
+
+ doItemsToRollBack(b);
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ A.runCommand({ replSetTest: 1, blind: false });
+ wait(function () { return !B.isMaster().ismaster; });
+ wait(function () { return A.isMaster().ismaster; });
+
+ assert(a.bar.count() >= 1, "count check");
+ doWritesToKeep2(a);
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ // as A is primary, B will roll back and then catch up
+ B.runCommand({ replSetTest: 1, blind: false });
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+
+ verify(a);
+
+ assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
+
+ pause("SUCCESS");
+ replTest.stopSet(signal);
+}
+
+doTest( 15 );
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
new file mode 100644
index 0000000..0f7754e
--- /dev/null
+++ b/jstests/replsets/sync1.js
@@ -0,0 +1,192 @@
+// test rollback of replica sets
+
+var debugging=0;
+
+function pause(s) {
+ // for debugging just to keep processes running
+ print("\nsync1.js: " + s);
+ if (debugging) {
+ while (1) {
+ print("\nsync1.js: " + s);
+ sleep(4000);
+ }
+ }
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var nodes = replTest.startSet({ oplogSize: "40" });
+
+ sleep(5000);
+
+ print("\nsync1.js ********************************************************************** part 0");
+ replTest.initiate();
+
+ // get master
+ print("\nsync1.js ********************************************************************** part 1");
+ var master = replTest.getMaster();
+ print("\nsync1.js ********************************************************************** part 2");
+ var dbs = [master.getDB("foo")];
+
+ for (var i in nodes) {
+ if (nodes[i] + "" == master + "") {
+ continue;
+ }
+ dbs.push(nodes[i].getDB("foo"));
+ nodes[i].setSlaveOk();
+ }
+
+ print("\nsync1.js ********************************************************************** part 3");
+ dbs[0].bar.drop();
+
+ print("\nsync1.js ********************************************************************** part 4");
+ // slow things down a bit
+ dbs[0].bar.ensureIndex({ x: 1 });
+ dbs[0].bar.ensureIndex({ y: 1 });
+ dbs[0].bar.ensureIndex({ z: 1 });
+ dbs[0].bar.ensureIndex({ w: 1 });
+
+ var ok = false;
+ var inserts = 100000;
+
+ print("\nsync1.js ********************************************************************** part 5");
+
+ for (var i = 0; i < inserts; i++) {
+ dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
+ }
+
+ var status;
+ do {
+ sleep(1000);
+ status = dbs[0].getSisterDB("admin").runCommand({replSetGetStatus : 1});
+ } while(status.members[1].state != 2 && status.members[2].state != 2);
+
+ print("\nsync1.js ********************************************************************** part 6");
+ dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
+
+ print("\nsync1.js ********************************************************************** part 7");
+
+ sleep(5000);
+
+ // yay! there are out-of-date nodes
+ var max1;
+ var max2;
+ var count = 0;
+ while( 1 ) {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch(e) {
+ print("\nsync1.js couldn't get max1/max2; retrying " + e);
+ sleep(2000);
+ count++;
+ if (count == 50) {
+ assert(false, "errored out 50 times");
+ }
+ continue;
+ }
+ break;
+ }
+
+ print("\nsync1.js ********************************************************************** part 8");
+
+ if (max1.z == (inserts-1) && max2.z == (inserts-1)) {
+ print("\nsync1.js try increasing # if inserts and running again");
+ replTest.stopSet(signal);
+ return;
+ }
+
+ // wait for a new master to be elected
+ sleep(5000);
+
+ // figure out who is master now
+ var newMaster = replTest.getMaster();
+
+ print("\nsync1.js ********************************************************************** part 9");
+
+ print("\nsync1.js \nsync1.js ********************************************************************** part 9 **********************************************");
+ assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
+ print("\nsync1.js new master is " + newMaster + ", old master was " + master);
+
+ count = 0;
+ do {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch( e ) {
+ print("\nsync1.js: exception querying; will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
+
+ // printjson(max1);
+ // printjson(max2);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail phase 1");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ } while (max1.z != max2.z);
+
+ // okay, now they're caught up. We have a max:
+ var max = max1.z;
+
+ print("\nsync1.js ********************************************************************** part 10");
+
+ // now, let's see if rollback works
+ var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
+ dbs[0].getMongo().setSlaveOk();
+
+ printjson(result);
+ sleep(5000);
+
+ // FAIL! This never resyncs
+ // now this should resync
+ print("\nsync1.js ********************************************************************** part 11");
+ var max0 = null;
+ count = 0;
+ do {
+ try {
+ max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch(e) {
+ print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ printjson(max);
+ printjson(max0);
+ print("\nsync1.js part 11 waiting for match " + count + " " + Date() + " z[0]:" + max0.z + " z:" + max);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail part 11");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ print("||||| count:" + count);
+ printjson(max0);
+ } while (! max0 || max0.z != max);
+
+ print("\nsync1.js ********************************************************************** part 12");
+ pause("\nsync1.js success");
+ replTest.stopSet(signal);
+}
+
+if( 1 || debugging ) {
+ doTest( 15 );
+}
diff --git a/jstests/replsets/twosets.js b/jstests/replsets/twosets.js
new file mode 100644
index 0000000..7cf367b
--- /dev/null
+++ b/jstests/replsets/twosets.js
@@ -0,0 +1,36 @@
+// add a node from a different set to the current set
+// I don't know what should happen here.
+
+doTest = function( signal ) {
+
+ var orig = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ orig.startSet();
+
+ var interloper = new ReplSetTest( {name: 'testSet', nodes: 3, startPort : 31003} );
+ interloper.startSet();
+
+ sleep(5000);
+
+ orig.initiate();
+ interloper.initiate();
+
+ sleep(5000);
+
+ var master = orig.getMaster();
+
+ var conf = master.getDB("local").system.replset.findOne();
+
+ var nodes = interloper.nodeList();
+ var host = nodes[0];
+ var id = conf.members.length;
+ conf.members.push({_id : id, host : host});
+ conf.version++;
+
+ var result = master.getDB("admin").runCommand({replSetReconfig : conf});
+
+ // now... stuff should blow up?
+
+ sleep(10);
+}
+
+doTest(15); \ No newline at end of file
diff --git a/jstests/rs/rs_basic.js b/jstests/rs/rs_basic.js
new file mode 100644
index 0000000..08de689
--- /dev/null
+++ b/jstests/rs/rs_basic.js
@@ -0,0 +1,177 @@
+// rs_basic.js
+
+load("../../jstests/rs/test_framework.js");
+
+function go() {
+ assert(__nextPort == 27000, "_nextPort==27000");
+
+ a = null;
+ try {init
+ a = new Mongo("localhost:27000");
+ print("using already open mongod on port 27000 -- presume you are debugging or something. should start empty.");
+ __nextPort++;
+ }
+ catch (e) {
+ a = rs_mongod();
+ }
+
+ b = rs_mongod();
+
+ x = a.getDB("admin");
+ y = b.getDB("admin");
+ memb = [];
+ memb[0] = x;
+ memb[1] = y;
+
+ print("rs_basic.js go(): started 2 servers");
+
+ cfg = { _id: 'asdf', members: [] };
+ var hn = hostname();
+ cfg.members[0] = { _id: 0, host: hn + ":27000" };
+ cfg.members[1] = { _id: 1, host: hn + ":27001" };
+
+ print("cfg=" + tojson(cfg));
+}
+
+function init(server) {
+ var i = server;
+ //i = Random.randInt(2); // a random member of the set
+ var m = memb[i];
+ assert(!m.ismaster(), "not ismaster");
+ var res = m.runCommand({ replSetInitiate: cfg });
+ return res;
+}
+
+_path = '../../db/Debug/';
+print("_path var set to " + _path);
+
+print("go() to run");
+print("init() to initiate");
+
+
+/*
+var rt = new ReplTest( "basic1" );
+
+m = rt.start( true );
+s = rt.start( false );
+
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+}
+
+am = m.getDB( "foo" );
+as = s.getDB( "foo" );
+
+function check( note ){
+ var start = new Date();
+ var x,y;
+ while ( (new Date()).getTime() - start.getTime() < 30000 ){
+ x = am.runCommand( "dbhash" );
+ y = as.runCommand( "dbhash" );
+ if ( x.md5 == y.md5 )
+ return;
+ sleep( 200 );
+ }
+ assert.eq( x.md5 , y.md5 , note );
+}
+
+am.a.save( { x : 1 } );
+check( "A" );
+
+am.a.save( { x : 5 } );
+
+am.a.update( {} , { $inc : { x : 1 } } );
+check( "B" );
+
+am.a.update( {} , { $inc : { x : 1 } } , false , true );
+check( "C" );
+
+// ----- check features -------
+
+// map/reduce
+am.mr.insert( { tags : [ "a" ] } )
+am.mr.insert( { tags : [ "a" , "b" ] } )
+am.getLastError();
+check( "mr setup" );
+
+m = function(){
+ for ( var i=0; i<this.tags.length; i++ ){
+ print( "\t " + i );
+ emit( this.tags[i] , 1 );
+ }
+}
+
+r = function( key , v ){
+ return Array.sum( v );
+}
+
+correct = { a : 2 , b : 1 };
+
+function checkMR( t ){
+ var res = t.mapReduce( m , r );
+ assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
+}
+
+function checkNumCollections( msg , diff ){
+ if ( ! diff ) diff = 0;
+ var m = am.getCollectionNames();
+ var s = as.getCollectionNames();
+ assert.eq( m.length + diff , s.length , "lengths bad \n" + tojson( m ) + "\n" + tojson( s ) );
+}
+
+checkNumCollections( "MR1" );
+checkMR( am.mr );
+checkMR( as.mr );
+checkNumCollections( "MR2" );
+
+block();
+checkNumCollections( "MR3" );
+
+var res = am.mr.mapReduce( m , r , { out : "xyz" } );
+block();
+
+checkNumCollections( "MR4" );
+
+
+t = am.rpos;
+t.insert( { _id : 1 , a : [ { n : "a" , c : 1 } , { n : "b" , c : 1 } , { n : "c" , c : 1 } ] , b : [ 1 , 2 , 3 ] } )
+block();
+check( "after pos 1 " );
+
+t.update( { "a.n" : "b" } , { $inc : { "a.$.c" : 1 } } )
+block();
+check( "after pos 2 " );
+
+t.update( { "b" : 2 } , { $inc : { "b.$" : 1 } } )
+block();
+check( "after pos 3 " );
+
+t.update( { "b" : 3} , { $set : { "b.$" : 17 } } )
+block();
+check( "after pos 4 " );
+
+
+printjson( am.rpos.findOne() )
+printjson( as.rpos.findOne() )
+
+//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson )
+
+t = am.b;
+t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 6743} } , true, false)
+block()
+check( "b 1" );
+
+t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 5} } , true, false)
+block()
+check( "b 2" );
+
+t.update( { "_id" : "fun"}, { $inc : {"a.b.c.x" : 100, "a.b.c.y" : 911} } , true, false)
+block()
+assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.findOne() , "b 3" );
+//printjson( t.findOne() )
+//printjson( as.b.findOne() )
+//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().sort( { $natural : -1 } ).limit(3).forEach( printjson )
+check("b 4");
+
+rt.stop();
+*/
diff --git a/jstests/rs/test_framework.js b/jstests/rs/test_framework.js
new file mode 100644
index 0000000..eb6e628
--- /dev/null
+++ b/jstests/rs/test_framework.js
@@ -0,0 +1,30 @@
+// test helpers
+// load("test_framework.js")
+
+DB.prototype.isMaster = function() {
+ return this.runCommand("isMaster");
+}
+DB.prototype.ismaster = function () { return this.isMaster().ismaster; }
+
+function rs_mongod() {
+ /* run mongod for a replica set member. wipes data dir! */
+ var port = __nextPort++;
+ var not_me = (port == 27000 ? port + 1 : port - 1);
+ var f = startMongodEmpty;
+ var dir = "" + port; // e.g., data/db/27000
+ var conn = f.apply(null, [
+ {
+ port: port,
+ dbpath: "/data/db/" + dir,
+ noprealloc: "",
+ smallfiles: "",
+ oplogSize: "2",
+ //nohttpinterface: ""
+ rest: "", // --rest is best for replica set administration
+ replSet: "asdf/" + hostname() + ":" + not_me
+ }
+ ]
+ );
+ conn.name = "localhost:" + port;
+ return conn;
+}
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
new file mode 100644
index 0000000..f28feed
--- /dev/null
+++ b/jstests/sharding/addshard1.js
@@ -0,0 +1,56 @@
+s = new ShardingTest( "add_shard1", 1 );
+
+assert.eq( 1, s.config.shards.count(), "initial server count wrong" );
+
+// create a shard and add a database; if the database is not duplicated the mongod should accepted
+// it as shard
+conn1 = startMongodTest( 29000 );
+
+db1 = conn1.getDB( "testDB" );
+numObjs = 0;
+for (i=0; i<3; i++){
+ db1.foo.save( { a : i } );
+ numObjs++;
+}
+db1.getLastError()
+
+newShard = "myShard";
+assert( s.admin.runCommand( { addshard: "localhost:29000" , name: newShard } ).ok, "did not accepted non-duplicated shard" );
+
+// a mongod with an existing database name should not be allowed to become a shard
+conn2 = startMongodTest( 29001 );
+db2 = conn2.getDB( "otherDB" );
+db2.foo.save( {a:1} );
+db2.getLastError()
+db3 = conn2.getDB( "testDB" );
+db3.foo.save( {a:1} );
+db3.getLastError()
+
+s.config.databases.find().forEach( printjson )
+rejectedShard = "rejectedShard";
+assert( ! s.admin.runCommand( { addshard: "localhost:29001" , name : rejectedShard } ).ok, "accepted mongod with duplicate db" );
+
+// check that all collection that were local to the mongod's are accessible through the mongos
+sdb1 = s.getDB( "testDB" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count for database that existed before addshard" );
+sdb2 = s.getDB( "otherDB" );
+assert.eq( 0 , sdb2.foo.count() , "database of rejected shard appears through mongos" );
+
+// make sure we can move a DB from the original mongod to a previoulsy existing shard
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), newShard , "DB primary is wrong" );
+origShard = s.getNonPrimaries( "testDB" )[0];
+s.adminCommand( { moveprimary : "testDB" , to : origShard } );
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary ), origShard , "DB primary didn't move" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
+
+// make sure we can shard the original collections
+sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+s.adminCommand( { enablesharding : "testDB" } );
+s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
+s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
+assert.eq( 2 , s.config.chunks.count(), "wrong chunk number after splitting collection that existed before" );
+assert.eq( numObjs , sdb1.foo.count() , "wrong count after splitting collection that existed before" );
+
+stopMongod( 29000 );
+stopMongod( 29001 );
+s.stop();
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 92a4ce8..346c43a 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -14,38 +14,50 @@ coll = db.foo;
var i=0;
-for ( ; i<500; i++ ){
+for ( ; i<100; i++ ){
coll.save( { num : i , s : bigString } );
}
-
-s.adminCommand( "connpoolsync" );
+db.getLastError();
primary = s.getServer( "test" ).getDB( "test" );
-assert.eq( 1 , s.config.chunks.count() );
-assert.eq( 500 , primary.foo.count() );
+counts = []
+
+s.printChunks();
+counts.push( s.config.chunks.count() );
+assert.eq( 100 , primary.foo.count() );
print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
-for ( ; i<800; i++ ){
+for ( ; i<200; i++ ){
coll.save( { num : i , s : bigString } );
}
-assert.eq( 1 , s.config.chunks.count() );
+s.printChunks()
+counts.push( s.config.chunks.count() );
-for ( ; i<1500; i++ ){
+for ( ; i<400; i++ ){
coll.save( { num : i , s : bigString } );
}
-assert.eq( 3 , s.config.chunks.count() , "shard didn't split A " );
s.printChunks();
+counts.push( s.config.chunks.count() );
-for ( ; i<3000; i++ ){
+for ( ; i<700; i++ ){
coll.save( { num : i , s : bigString } );
}
+db.getLastError();
-assert.eq( 4 , s.config.chunks.count() , "shard didn't split B " );
s.printChunks();
+counts.push( s.config.chunks.count() );
+
+assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) )
+sorted = counts.slice(0)
+sorted.sort();
+assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) )
+
+print( counts )
+printjson( db.stats() )
s.stop();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index c6ec374..5ac9cd9 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -1,6 +1,6 @@
// auto2.js
-s = new ShardingTest( "auto2" , 2 , 1 , 1 );
+s = new ShardingTest( "auto2" , 2 , 5 , 2 );
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
@@ -26,19 +26,116 @@ for ( j=0; j<30; j++ ){
) );
}
+assert.eq( i , j * 100 , "setup" );
s.adminCommand( "connpoolsync" );
+db.getLastError();
print( "done inserting data" );
print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand( { datasize : "test.foo" } ) ) );
s.printChunks();
-counta = s._connections[0].getDB( "test" ).foo.count();
-countb = s._connections[1].getDB( "test" ).foo.count();
+function doCountsGlobal(){
+ counta = s._connections[0].getDB( "test" ).foo.count();
+ countb = s._connections[1].getDB( "test" ).foo.count();
+ return counta + countb;
+}
+
+doCountsGlobal()
+
+assert( counta > 0 , "diff1" );
+assert( countb > 0 , "diff2" );
+
+print( "checkpoint B" )
+
+var missing = [];
+
+for ( i=0; i<j*100; i++ ){
+ var x = coll.findOne( { num : i } );
+ if ( ! x ){
+ missing.push( i );
+ print( "can't find: " + i );
+ sleep( 5000 );
+ x = coll.findOne( { num : i } );
+ if ( ! x ){
+ print( "still can't find: " + i );
+
+ for ( var zzz=0; zzz<s._connections.length; zzz++ ){
+ if ( s._connections[zzz].getDB( "test" ).foo.findOne( { num : i } ) ){
+ print( "found on wrong server: " + s._connections[zzz] );
+ }
+ }
+
+ }
+ }
+}
+
-assert.eq( j * 100 , counta + countb , "from each a:" + counta + " b:" + countb + " i:" + i );
+
+s.printChangeLog();
+
+print( "missing: " + tojson( missing ) )
+assert.soon( function(z){ return doCountsGlobal() == j * 100; } , "from each a:" + counta + " b:" + countb + " i:" + i );
+print( "checkpoint B.a" )
+s.printChunks();
assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
+assert.eq( j * 100 , counta + countb , "from each 2 a:" + counta + " b:" + countb + " i:" + i );
+assert( missing.length == 0 , "missing : " + tojson( missing ) );
+
+print( "checkpoint C" )
assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
+for ( i=0; i<100; i++ ){
+ cursor = coll.find().batchSize(5);
+ cursor.next();
+ cursor = null;
+ gc();
+}
+
+print( "checkpoint D")
+
+// test not-sharded cursors
+db = s.getDB( "test2" );
+t = db.foobar;
+for ( i =0; i<100; i++ )
+ t.save( { _id : i } );
+for ( i=0; i<100; i++ ){
+ t.find().batchSize( 2 ).next();
+ assert.lt( 0 , db.runCommand( "cursorInfo" ).totalOpen , "cursor1" );
+ gc();
+}
+
+for ( i=0; i<100; i++ ){
+ gc();
+}
+assert.eq( 0 , db.runCommand( "cursorInfo" ).totalOpen , "cursor2" );
+
+print( "checkpoint E")
+
+x = db.runCommand( "connPoolStats" );
+for ( host in x.hosts ){
+ var foo = x.hosts[host];
+ assert.lt( 0 , foo.available , "pool: " + host );
+}
+
+print( "checkpoint F")
+
+assert( t.findOne() , "check close 0" );
+
+for ( i=0; i<20; i++ ){
+ temp = new Mongo( db.getMongo().host )
+ temp2 = temp.getDB( "test2" ).foobar;
+ assert.eq( temp._fullNameSpace , t._fullNameSpace , "check close 1" );
+ assert( temp2.findOne() , "check close 2" );
+ temp = null;
+ gc();
+}
+
+print( "checkpoint G")
+
+assert.throws( function(){ s.getDB( "test" ).foo.find().sort( { s : 1 } ).forEach( printjsononeline ) } )
+
+print( "checkpoint H")
+
s.stop();
diff --git a/jstests/sharding/bigMapReduce.js b/jstests/sharding/bigMapReduce.js
new file mode 100644
index 0000000..1cc12f4
--- /dev/null
+++ b/jstests/sharding/bigMapReduce.js
@@ -0,0 +1,17 @@
+s = new ShardingTest( "bigMapReduce" , 2 , 1 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { shardcollection : "test.foo", key : { "_id" : 1 } } )
+
+db = s.getDB( "test" );
+var str=""
+for (i=0;i<4*1024;i++) { str=str+"a"; }
+for (j=0; j<50; j++) for (i=0; i<512; i++){ db.foo.save({y:str})}
+
+function map() { emit('count', 1); }
+function reduce(key, values) { return Array.sum(values) }
+
+out = db.foo.mapReduce(map, reduce)
+printjson(out) // SERVER-1400
+
+s.stop()
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index a697162..ed69d1f 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,7 +1,6 @@
// count1.js
-s = new ShardingTest( "count1" , 2 );
-
+s = new ShardingTest( "count1" , 2 , 1 );
db = s.getDB( "test" );
db.bar.save( { n : 1 } )
@@ -15,16 +14,16 @@ s.adminCommand( { enablesharding : "test" } )
s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
primary = s.getServer( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-db.foo.save( { name : "eliot" } )
-db.foo.save( { name : "sara" } )
-db.foo.save( { name : "bob" } )
-db.foo.save( { name : "joe" } )
-db.foo.save( { name : "mark" } )
-db.foo.save( { name : "allan" } )
+db.foo.save( { _id : 1 , name : "eliot" } )
+db.foo.save( { _id : 2 , name : "sara" } )
+db.foo.save( { _id : 3 , name : "bob" } )
+db.foo.save( { _id : 4 , name : "joe" } )
+db.foo.save( { _id : 5 , name : "mark" } )
+db.foo.save( { _id : 6 , name : "allan" } )
assert.eq( 6 , db.foo.find().count() , "basic count" );
@@ -35,20 +34,57 @@ s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
assert.eq( 6 , db.foo.find().count() , "basic count after split " );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : secondary.getMongo().name } );
assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
-assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
+assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" );
assert.eq( 3 , primary.foo.find().sort( { name : 1 } ).toArray().length , "primary count sorted" );
-assert.eq( 3 , seconday.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" );
+assert.eq( 3 , secondary.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" );
assert.eq( 6 , db.foo.find().toArray().length , "total count after move" );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count() sorted" );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count() after move" );
-assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
-assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
+function nameString( c ){
+ var s = "";
+ while ( c.hasNext() ){
+ var o = c.next();
+ if ( s.length > 0 )
+ s += ",";
+ s += o.name;
+ }
+ return s;
+}
+
+assert.eq( "allan,bob,eliot,joe,mark,sara" , nameString( db.foo.find().sort( { name : 1 } ) ) , "sort 1" );
+assert.eq( "sara,mark,joe,eliot,bob,allan" , nameString( db.foo.find().sort( { name : -1 } ) ) , "sort 2" );
+
+assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" )
+assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" )
+assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" )
+assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" )
+assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" )
+assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" )
+assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" )
+
+assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" )
+assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" )
+assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" )
+
+assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" )
+assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" )
+assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" )
+
+for ( i=0; i<10; i++ ){
+ db.foo.save( { _id : 7 + i , name : "zzz" + i } )
+}
+
+assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" )
+assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" )
+assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" )
+sleep( 5000 )
+assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" )
s.stop();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
new file mode 100644
index 0000000..2d9507e
--- /dev/null
+++ b/jstests/sharding/count2.js
@@ -0,0 +1,43 @@
+// count2.js
+
+s1 = new ShardingTest( "count2" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding: "test" } );
+s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } );
+
+db1 = s1.getDB( "test" ).foo;
+db2 = s2.getDB( "test" ).foo;
+
+assert.eq( 1, s1.config.chunks.count(), "sanity check A");
+
+db1.save( { name : "aaa" } )
+db1.save( { name : "bbb" } )
+db1.save( { name : "ccc" } )
+db1.save( { name : "ddd" } )
+db1.save( { name : "eee" } )
+db1.save( { name : "fff" } )
+
+s1.adminCommand( { split : "test.foo" , middle : { name : "ddd" } } );
+
+assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos1" );
+assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos2" );
+
+s1.printChunks( "test.foo" )
+
+s1.adminCommand( { movechunk : "test.foo" , find : { name : "aaa" } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+
+assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "post count mongos1" );
+
+// The second mongos still thinks its shard mapping is valid and accepts a cound
+print( "before sleep: " + Date() )
+sleep( 2000 )
+print( "after sleep: " + Date() )
+s1.printChunks( "test.foo" )
+assert.eq( 3, db2.find( { name : { $gte: "aaa" , $lt: "ddd" } } ).count() , "post count mongos2" );
+
+db2.findOne();
+
+assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) );
+
+s1.stop();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
new file mode 100644
index 0000000..2a30936
--- /dev/null
+++ b/jstests/sharding/cursor1.js
@@ -0,0 +1,60 @@
+// cursor1.js
+// checks that cursors survive a chunk's move
+
+s = new ShardingTest( "sharding_cursor1" , 2 , 2 )
+
+// take the balancer out of the equation
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+s.config.settings.find().forEach( printjson )
+
+// create a sharded 'test.foo', for the moment with just one chunk
+s.adminCommand( { enablesharding: "test" } );
+s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } )
+
+db = s.getDB( "test" );
+primary = s.getServer( "test" ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
+
+numObjs = 10;
+for (i=0; i < numObjs; i++){
+ db.foo.insert({_id: i});
+}
+db.getLastError();
+assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" );
+
+// we'll split the collection in two and move the second chunk while three cursors are open
+// cursor1 still has more data in the first chunk, the one that didn't move
+// cursor2 buffered the last obj of the first chunk
+// cursor3 buffered data that was moved on the second chunk
+var cursor1 = db.foo.find().batchSize( 3 );
+assert.eq( 3 , cursor1.objsLeftInBatch() );
+var cursor2 = db.foo.find().batchSize( 5 );
+assert.eq( 5 , cursor2.objsLeftInBatch() );
+var cursor3 = db.foo.find().batchSize( 7 );
+assert.eq( 7 , cursor3.objsLeftInBatch() );
+
+s.adminCommand( { split: "test.foo" , middle : { _id : 5 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { _id : 5 } , to : secondary.getMongo().name } );
+assert.eq( 2, s.config.chunks.count() );
+
+// the cursors should not have been affected
+assert.eq( numObjs , cursor1.itcount() , "c1" );
+assert.eq( numObjs , cursor2.itcount() , "c2" );
+assert.eq( numObjs , cursor3.itcount() , "c3" );
+
+// test timeout
+gc(); gc();
+cur = db.foo.find().batchSize( 2 )
+assert( cur.next() , "T1" )
+assert( cur.next() , "T2" );
+before = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+printjson( before )
+sleep( 6000 )
+assert( cur.next() , "T3" )
+assert( cur.next() , "T4" );
+sleep( 22000 )
+assert.throws( function(){ cur.next(); } , "T5" )
+after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
+gc(); gc()
+
+s.stop()
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 6497bc0..31fd75a 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -14,7 +14,7 @@ assert( 3 , test1.count() );
assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" } ).ok , "host not up" );
-assert( ! s.admin.runCommand( { addshard: "127.0.0.1:43415" , allowLocal : true } ).ok , "host not up" );
+assert( ! s.admin.runCommand( { addshard: "10.0.0.1:43415" } ).ok , "allowed shard in IP when config is localhost" );
s.stop();
diff --git a/jstests/sharding/error1.js b/jstests/sharding/error1.js
index b4db9c3..e1aae06 100644
--- a/jstests/sharding/error1.js
+++ b/jstests/sharding/error1.js
@@ -18,19 +18,21 @@ assert( db.getLastError() , "gle22" );
s.adminCommand( { shardcollection : "test.foo2" , key : { num : 1 } } );
-db.foo2.insert( { _id : 1 , num : 5 } );
-db.foo2.insert( { _id : 2 , num : 10 } );
-db.foo2.insert( { _id : 3 , num : 15 } );
-db.foo2.insert( { _id : 4 , num : 20 } );
+db.foo2.save( { _id : 1 , num : 5 } );
+db.foo2.save( { _id : 2 , num : 10 } );
+db.foo2.save( { _id : 3 , num : 15 } );
+db.foo2.save( { _id : 4 , num : 20 } );
s.adminCommand( { split : "test.foo2" , middle : { num : 10 } } );
s.adminCommand( { movechunk : "test.foo2" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } );
+print( "a: " + a.foo2.count() );
+print( "b: " + b.foo2.count() );
assert( a.foo2.count() > 0 && a.foo2.count() < 4 , "se1" );
assert( b.foo2.count() > 0 && b.foo2.count() < 4 , "se2" );
assert.eq( 4 , db.foo2.count() , "se3" );
-db.foo2.insert( { _id : 5 , num : 25 } );
+db.foo2.save( { _id : 5 , num : 25 } );
assert( ! db.getLastError() , "se3.5" );
s.sync();
assert.eq( 5 , db.foo2.count() , "se4" );
@@ -43,5 +45,17 @@ assert( db.getLastError() , "se6" );
assert.eq( 5 , db.foo2.count() , "se5" );
+
+// assert in mongos
+s.adminCommand( { shardcollection : "test.foo3" , key : { num : 1 } } );
+assert.isnull(db.getLastError() , "gle C1" );
+
+db.foo3.insert({}); //this fails with no shard key error
+assert(db.getLastError() , "gle C2a" );
+assert(db.getLastError() , "gle C2b" );
+
+db.foo3.insert({num:1});
+assert.isnull(db.getLastError() , "gle C3a" );
+
// ----
s.stop();
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index d2f692a..05b8b8c 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -50,10 +50,15 @@ s.sync();
assert.eq( 4 , a.foo.getIndexKeys().length , "a index 3" );
assert.eq( 4 , b.foo.getIndexKeys().length , "b index 3" );
+db.foo.ensureIndex( { num : 1 , bar : 1 } , true );
+s.sync();
+assert.eq( 5 , b.foo.getIndexKeys().length , "c index 3" );
+
// ---- can't shard thing with unique indexes
db.foo2.ensureIndex( { a : 1 } );
s.sync();
+printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() );
assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" );
db.foo3.ensureIndex( { a : 1 } , true );
@@ -61,6 +66,12 @@ s.sync();
printjson( db.system.indexes.find( { ns : "test.foo3" } ).toArray() );
assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" );
+db.foo7.ensureIndex( { num : 1 , a : 1 } , true );
+s.sync();
+printjson( db.system.indexes.find( { ns : "test.foo7" } ).toArray() );
+assert( s.admin.runCommand( { shardcollection : "test.foo7" , key : { num : 1 } } ).ok , "shard with ok unique index" );
+
+
// ----- eval -----
db.foo2.save( { num : 5 , a : 7 } );
@@ -83,6 +94,7 @@ s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } );
s.adminCommand( { movechunk : "test.foo4" , find : { num : 20 } , to : s.getOther( s.getServer( "test" ) ).name } );
db.foo4.save( { num : 5 } );
db.foo4.save( { num : 15 } );
+db.getLastError();
s.sync();
assert.eq( 1 , a.foo4.count() , "ua1" );
assert.eq( 1 , b.foo4.count() , "ub1" );
@@ -120,13 +132,15 @@ assert( ! s.admin.runCommand( { shardcollection : "test.foo5" , key : { num : 1
db.foo6.save( { a : 1 } );
db.foo6.save( { a : 3 } );
db.foo6.save( { a : 3 } );
+db.foo6.ensureIndex( { a : 1 } );
s.sync();
+printjson( db.system.indexes.find( { ns : "test.foo6" } ).toArray() );
assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } ,
reduce : function(z,prev){ prev.count++; } } ).length );
assert.eq( 3 , db.foo6.find().count() );
-assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 2 } } ).ok );
+assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 1 } } ).ok );
assert.eq( 3 , db.foo6.find().count() );
s.adminCommand( { split : "test.foo6" , middle : { a : 2 } } );
@@ -135,5 +149,16 @@ s.adminCommand( { movechunk : "test.foo6" , find : { a : 3 } , to : s.getOther(
assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );;
+// ---- can't shard non-empty collection without index -----
+
+db.foo8.save( { a : 1 } );
+assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
+
+// --- listDatabases ---
+
+r = db.getMongo().getDBs()
+assert.eq( 4 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
+assert.lt( 10000 , r.totalSize , "listDatabases 2 : " + tojson( r ) );
+
s.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 47fedc8..dfb2883 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -13,6 +13,7 @@ db = s.getDB( "test" );
db.foo.save( { x : 1 } );
db.foo.save( { x : 2 } );
db.foo.save( { x : 3 } );
+db.foo.ensureIndex( { x : 1 } );
assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 1" );
assert( a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3 , "distinct 2" );
@@ -51,25 +52,27 @@ assert.eq( 0 , db.foo.count() , "D7" );
// --- _id key ---
-db.foo2.insert( { _id : new ObjectId() } );
-db.foo2.insert( { _id : new ObjectId() } );
-db.foo2.insert( { _id : new ObjectId() } );
+db.foo2.save( { _id : new ObjectId() } );
+db.foo2.save( { _id : new ObjectId() } );
+db.foo2.save( { _id : new ObjectId() } );
+db.getLastError();
assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
+printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() );
s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );
assert.eq( 3 , db.foo2.count() , "F2" )
db.foo2.insert( {} );
assert.eq( 4 , db.foo2.count() , "F3" )
-
// --- map/reduce
db.mr.save( { x : 1 , tags : [ "a" , "b" ] } );
db.mr.save( { x : 2 , tags : [ "b" , "c" ] } );
db.mr.save( { x : 3 , tags : [ "c" , "a" ] } );
db.mr.save( { x : 4 , tags : [ "b" , "c" ] } );
+db.mr.ensureIndex( { x : 1 } );
m = function(){
this.tags.forEach(
@@ -88,8 +91,12 @@ r = function( key , values ){
};
doMR = function( n ){
+ print(n);
+
var res = db.mr.mapReduce( m , r );
printjson( res );
+ assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
+
var x = db[res.result];
assert.eq( 3 , x.find().count() , "MR T1 " + n );
@@ -111,4 +118,42 @@ assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
doMR( "after" );
+s.adminCommand({split:'test.mr' , middle:{x:3}} );
+s.adminCommand({split:'test.mr' , middle:{x:4}} );
+s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getServer('test').name } );
+
+doMR( "after extra split" );
+
+cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " };
+
+x = db.runCommand( cmd );
+y = s._connections[0].getDB( "test" ).runCommand( cmd );
+
+printjson( x )
+printjson( y )
+
+// count
+
+db.countaa.save({"regex" : /foo/i})
+db.countaa.save({"regex" : /foo/i})
+db.countaa.save({"regex" : /foo/i})
+assert.eq( 3 , db.countaa.count() , "counta1" );
+assert.eq( 3 , db.countaa.find().itcount() , "counta1" );
+
+x = null; y = null;
+try {
+ x = db.runCommand( "forceerror" )
+}
+catch ( e ){
+ x = e;
+}
+try {
+ y = s._connections[0].getDB( "test" ).runCommand( "forceerror" );
+}
+catch ( e ){
+ y = e;
+}
+
+assert.eq( x , y , "assert format" )
+
s.stop();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
new file mode 100644
index 0000000..4ab75ee
--- /dev/null
+++ b/jstests/sharding/features3.js
@@ -0,0 +1,86 @@
+
+s = new ShardingTest( "features3" , 2 , 1 , 1 );
+s.adminCommand( { enablesharding : "test" } );
+
+a = s._connections[0].getDB( "test" );
+b = s._connections[1].getDB( "test" );
+
+db = s.getDB( "test" );
+
+// ---------- load some data -----
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+N = 10000;
+s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } )
+s.adminCommand( { moveChunk : "test.foo", find : { _id : 3 } ,to : s.getNonPrimaries( "test" )[0] } )
+
+for ( i=0; i<N; i++ )
+ db.foo.insert( { _id : i } )
+db.getLastError();
+x = db.foo.stats();
+assert.eq( N , x.count , "total count" )
+assert.eq( N / 2 , x.shards.shard0000.count , "count on shard0000" )
+assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
+
+start = new Date()
+
+join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<5000; i++ ){ x+=i; } return true; } ).itcount()" )
+
+function getMine(){
+ var inprog = db.currentOp().inprog;
+ var mine = []
+ for ( var x=0; x<inprog.length; x++ ){
+ if ( inprog[x].query && inprog[x].query.$where ){
+ mine.push( inprog[x] )
+ }
+ }
+ return mine;
+}
+
+state = 0; // 0 = not found, 1 = killed,
+killTime = null;
+
+for ( i=0; i<100000; i++ ){
+ var mine = getMine();
+ if ( state == 0 ){
+ if ( mine.length == 0 ){
+ sleep(1);
+ continue;
+ }
+ state = 1;
+ mine.forEach( function(z){ printjson( db.getSisterDB( "admin" ).killOp( z.opid ) ); } )
+ killTime = new Date()
+ }
+ else if ( state == 1 ){
+ if ( mine.length == 0 ){
+ state = 2;
+ break;
+ }
+ continue;
+ }
+}
+
+killTime = (new Date()).getTime() - killTime.getTime()
+print( "killTime: " + killTime );
+
+assert.eq( 2 , state , "failed killing" );
+assert.gt( 3000 , killTime , "took too long to kill" )
+
+join()
+
+end = new Date()
+
+print( "elapsed: " + ( end.getTime() - start.getTime() ) );
+
+
+x = db.runCommand( "fsync" )
+assert( ! x.ok , "fsync not on admin should fail : " + tojson( x ) );
+assert( x.errmsg.indexOf( "access denied" ) >= 0 , "fsync not on admin should fail : " + tojson( x ) )
+
+x = db._adminCommand( "fsync" )
+assert( x.ok == 1 && x.numFiles > 0 , "fsync failed : " + tojson( x ) )
+
+x = db._adminCommand( { "fsync" :1, lock:true } )
+assert( ! x.ok , "lock should fail: " + tojson( x ) )
+
+s.stop()
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 774701f..437ec81 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,53 +1,51 @@
-s = new ShardingTest( "find_and_modify_sharded" , 2 );
+s = new ShardingTest( "find_and_modify_sharded" , 2 , 2);
s.adminCommand( { enablesharding : "test" } );
db = s.getDB( "test" );
primary = s.getServer( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
numObjs = 20;
s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
+// pre-split the collection so to avoid interference from balancer
+s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } );
+s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ;
+
for (var i=0; i < numObjs; i++){
db.stuff.insert({_id: i});
}
+db.getLastError()
-for (var i=0; i < numObjs; i+=2){
+// put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
+for (var i=2; i < numObjs; i+=2){
+ if (i == numObjs/2)
+ continue;
s.adminCommand( { split: "test.stuff" , middle : {_id: i} } );
}
-for (var i=0; i < numObjs; i+=4){
- s.adminCommand( { movechunk : "test.stuff" , find : {_id: i} , to : seconday.getMongo().name } );
-}
-
-//sorted update
-for (var i=0; i < numObjs; i++){
- assert.eq(db.stuff.count({a:1}), i, "1 A");
-
- var out = db.stuff.findAndModify({query: {a:null}, update: {$set: {a:1}}, sort: {_id:1}});
-
- assert.eq(db.stuff.count({a:1}), i+1, "1 B");
- assert.eq(db.stuff.findOne({_id:i}).a, 1, "1 C");
- assert.eq(out._id, i, "1 D");
-}
+s.printChunks();
+assert.eq( numObjs/2, s.config.chunks.count(), "split failed" );
+assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0000" }) );
+assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0001" }) );
-// unsorted update
+// update
for (var i=0; i < numObjs; i++){
assert.eq(db.stuff.count({b:1}), i, "2 A");
- var out = db.stuff.findAndModify({query: {b:null}, update: {$set: {b:1}}});
+ var out = db.stuff.findAndModify({query: {_id:i, b:null}, update: {$set: {b:1}}});
+ assert.eq(out._id, i, "2 E");
assert.eq(db.stuff.count({b:1}), i+1, "2 B");
- assert.eq(db.stuff.findOne({_id:out._id}).a, 1, "2 C");
}
-//sorted remove (no query)
+// remove
for (var i=0; i < numObjs; i++){
assert.eq(db.stuff.count(), numObjs - i, "3 A");
assert.eq(db.stuff.count({_id: i}), 1, "3 B");
- var out = db.stuff.findAndModify({remove: true, sort: {_id:1}});
+ var out = db.stuff.findAndModify({remove: true, query: {_id:i}});
assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
assert.eq(db.stuff.count({_id: i}), 0, "3 D");
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index d1644ac..1e0ba9d 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,14 +1,18 @@
// key_many.js
// values have to be sorted
+// you must have exactly 6 values in each array
types = [
{ name : "string" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield: "k" } ,
{ name : "double" , values : [ 1.2 , 3.5 , 4.5 , 4.6 , 6.7 , 9.9 ] , keyfield : "a" } ,
{ name : "date" , values : [ new Date( 1000000 ) , new Date( 2000000 ) , new Date( 3000000 ) , new Date( 4000000 ) , new Date( 5000000 ) , new Date( 6000000 ) ] , keyfield : "a" } ,
{ name : "string_id" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "_id" },
- { name : "embedded" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } ,
+ { name : "embedded 1" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b" } ,
{ name : "embedded 2" , values : [ "allan" , "bob" , "eliot" , "joe" , "mark" , "sara" ] , keyfield : "a.b.c" } ,
{ name : "object" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" } ,
+ { name : "compound" , values : [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ] , keyfield : "o" , compound : true } ,
+ { name : "oid_id" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "_id" } ,
+ { name : "oid_other" , values : [ ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() , ObjectId() ] , keyfield : "o" } ,
]
s = new ShardingTest( "key_many" , 2 );
@@ -20,7 +24,18 @@ seconday = s.getOther( primary ).getDB( "test" );
function makeObjectDotted( v ){
var o = {};
- o[curT.keyfield] = v;
+ if (curT.compound){
+ var prefix = curT.keyfield + '.';
+ if (typeof(v) == 'object'){
+ for (key in v)
+ o[prefix + key] = v[key];
+ } else {
+ for (key in curT.values[0])
+ o[prefix + key] = v;
+ }
+ } else {
+ o[curT.keyfield] = v;
+ }
return o;
}
@@ -39,6 +54,15 @@ function makeObject( v ){
return o;
}
+function makeInQuery(){
+ if (curT.compound){
+ // cheating a bit...
+ return {'o.a': {$in: [1,2]}};
+ } else {
+ return makeObjectDotted({$in: curT.values});
+ }
+}
+
function getKey( o ){
var keys = curT.keyfield.split('.');
for(var i=0; i<keys.length; i++){
@@ -85,7 +109,20 @@ for ( var i=0; i<types.length; i++ ){
assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).count() , curT.name + " total count with count()" );
+ assert.eq( 2 , c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count() , curT.name + " $or count()" );
+ assert.eq( 2 , c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount() , curT.name + " $or itcount()" );
+ assert.eq( 4 , c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count() , curT.name + " $nor count()" );
+ assert.eq( 4 , c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount() , curT.name + " $nor itcount()" );
+
+ var stats = c.stats();
+ printjson( stats )
+ assert.eq( 6 , stats.count , curT.name + " total count with stats()" );
+ var count = 0;
+ for (shard in stats.shards) count += stats.shards[shard].count;
+ assert.eq( 6 , count , curT.name + " total count with stats() sum" );
+
assert.eq( curT.values , c.find().sort( makeObjectDotted( 1 ) ).toArray().map( getKey ) , curT.name + " sort 1" );
+ assert.eq( curT.values , c.find(makeInQuery()).sort( makeObjectDotted( 1 ) ).toArray().map( getKey ) , curT.name + " sort 1 - $in" );
assert.eq( curT.values.reverse() , c.find().sort( makeObjectDotted( -1 ) ).toArray().map( getKey ) , curT.name + " sort 2" );
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 20dc6c1..952748e 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -19,13 +19,26 @@ to = s.getOther( from );
assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data before move" );
assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data before move" );
-assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , from.name , "not in db correctly to start" );
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ) ,
+ s.normalize( from.name ) , "not in db correctly to start" );
+s.printShardingStatus();
+oldShardName = s.config.databases.findOne( {_id: "test1"} ).primary;
s.admin.runCommand( { moveprimary : "test1" , to : to.name } );
-assert.eq( s.config.databases.findOne( { name : "test1" } ).primary , to.name , "to in config db didn't change" );
-
+s.printShardingStatus();
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
+ s.normalize( to.name ) , "to in config db didn't change after first move" );
assert.eq( 0 , from.getDB( "test1" ).foo.count() , "from still has data after move" );
assert.eq( 3 , to.getDB( "test1" ).foo.count() , "to doesn't have data after move" );
+// move back, now using shard name instead of server address
+s.admin.runCommand( { moveprimary : "test1" , to : oldShardName } );
+s.printShardingStatus();
+assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
+ oldShardName , "to in config db didn't change after second move" );
+
+assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data after move back" );
+assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data after move back" );
+
s.stop();
diff --git a/jstests/sharding/moveshard1.js b/jstests/sharding/moveshard1.js
deleted file mode 100644
index 9220983..0000000
--- a/jstests/sharding/moveshard1.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// movechunk1.js
-
-s = new ShardingTest( "movechunk1" , 2 );
-
-l = s._connections[0];
-r = s._connections[1];
-
-ldb = l.getDB( "foo" );
-rdb = r.getDB( "foo" );
-
-ldb.things.save( { a : 1 } )
-ldb.things.save( { a : 2 } )
-ldb.things.save( { a : 3 } )
-
-assert.eq( ldb.things.count() , 3 );
-assert.eq( rdb.things.count() , 0 );
-
-startResult = l.getDB( "admin" ).runCommand( { "movechunk.start" : "foo.things" ,
- "to" : s._connections[1].name ,
- "from" : s._connections[0].name ,
- filter : { a : { $gt : 2 } }
- } );
-print( "movechunk.start: " + tojson( startResult ) );
-assert( startResult.ok == 1 , "start failed!" );
-
-finishResult = l.getDB( "admin" ).runCommand( { "movechunk.finish" : "foo.things" ,
- finishToken : startResult.finishToken ,
- to : s._connections[1].name ,
- newVersion : 1 } );
-print( "movechunk.finish: " + tojson( finishResult ) );
-assert( finishResult.ok == 1 , "finishResult failed!" );
-
-assert.eq( rdb.things.count() , 1 , "right has wrong size after move" );
-assert.eq( ldb.things.count() , 2 , "left has wrong size after move" );
-
-
-s.stop();
-
-
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
new file mode 100644
index 0000000..6815492
--- /dev/null
+++ b/jstests/sharding/presplit.js
@@ -0,0 +1,37 @@
+// presplit.js
+
+// Starts a new sharding environment limiting the chunksize to 1MB.
+s = new ShardingTest( "presplit" , 2 , 2 , 1 , { chunksize : 1 } );
+
+// Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
+bigString = "";
+while ( bigString.length < 10000 ){
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+}
+
+db = s.getDB( "test" );
+inserted = 0;
+num = 0;
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+db.getLastError();
+
+// Make sure that there's only one chunk holding all the data.
+s.printChunks();
+primary = s.getServer( "test" ).getDB( "test" );
+assert.eq( 0 , s.config.chunks.count() , "single chunk assertion" );
+assert.eq( num , primary.foo.count() );
+
+// Turn on sharding on the 'test.foo' collection
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+// Make sure the collection's original chunk got split
+s.printChunks();
+assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" );
+assert.eq( num , primary.foo.count() );
+
+s.printChangeLog();
+s.stop(); \ No newline at end of file
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
new file mode 100644
index 0000000..9593bdf
--- /dev/null
+++ b/jstests/sharding/remove1.js
@@ -0,0 +1,16 @@
+s = new ShardingTest( "remove_shard1", 2 );
+
+assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
+
+// first remove puts in draining mode, the second actually removes
+assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" );
+assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to remove shard" );
+assert.eq( 1, s.config.shards.count() , "removed server still appears in count" );
+
+// should create a shard0002 shard
+conn = startMongodTest( 29000 );
+assert( s.admin.runCommand( { addshard: "localhost:29000" } ).ok, "failed to add shard" );
+assert.eq( 2, s.config.shards.count(), "new server does not appear in count" );
+
+stopMongod( 29000 );
+s.stop(); \ No newline at end of file
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
new file mode 100644
index 0000000..aa6137d
--- /dev/null
+++ b/jstests/sharding/rename.js
@@ -0,0 +1,26 @@
+s = new ShardingTest( "rename" , 2 , 1 , 1 );
+db = s.getDB( "test" );
+
+db.foo.insert({_id:1});
+db.foo.renameCollection('bar');
+assert.isnull(db.getLastError(), '1.0');
+assert.eq(db.bar.findOne(), {_id:1}, '1.1');
+assert.eq(db.bar.count(), 1, '1.2');
+assert.eq(db.foo.count(), 0, '1.3');
+
+db.foo.insert({_id:2});
+db.foo.renameCollection('bar', true);
+assert.isnull(db.getLastError(), '2.0');
+assert.eq(db.bar.findOne(), {_id:2}, '2.1');
+assert.eq(db.bar.count(), 1, '2.2');
+assert.eq(db.foo.count(), 0, '2.3');
+
+s.adminCommand( { enablesharding : "test" } );
+
+db.foo.insert({_id:3});
+db.foo.renameCollection('bar', true);
+assert.isnull(db.getLastError(), '3.0');
+assert.eq(db.bar.findOne(), {_id:3}, '3.1');
+assert.eq(db.bar.count(), 1, '3.2');
+assert.eq(db.foo.count(), 0, '3.3');
+
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index bbe1144..1783238 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -8,7 +8,8 @@ db = s.getDB( "test" );
db.foo.insert( { num : 1 , name : "eliot" } );
db.foo.insert( { num : 2 , name : "sara" } );
db.foo.insert( { num : -1 , name : "joe" } );
-assert.eq( 3 , db.foo.find().length() );
+db.foo.ensureIndex( { num : 1 } );
+assert.eq( 3 , db.foo.find().length() , "A" );
shardCommand = { shardcollection : "test.foo" , key : { num : 1 } };
@@ -18,10 +19,15 @@ s.adminCommand( { enablesharding : "test" } );
assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
s.adminCommand( shardCommand );
-dbconfig = s.config.databases.findOne( { name : "test" } );
-assert.eq( dbconfig.sharded["test.foo"] , { key : { num : 1 } , unique : false } , "Sharded content" );
-assert.eq( 1 , s.config.chunks.count() );
+cconfig = s.config.collections.findOne( { _id : "test.foo" } );
+delete cconfig.lastmod
+delete cconfig.dropped
+assert.eq( cconfig , { _id : "test.foo" , key : { num : 1 } , unique : false } , "Sharded content" );
+
+s.config.collections.find().forEach( printjson )
+
+assert.eq( 1 , s.config.chunks.count() , "num chunks A");
si = s.config.chunks.findOne();
assert( si );
assert.eq( si.ns , "test.foo" );
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index 5932210..09caf39 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -8,7 +8,17 @@ placeCheck = function( num ){
print("shard2 step: " + num );
}
-s = new ShardingTest( "shard2" , 2 , 6 );
+printAll = function(){
+ print( "****************" );
+ db.foo.find().forEach( printjsononeline )
+ print( "++++++++++++++++++" );
+ primary.foo.find().forEach( printjsononeline )
+ print( "++++++++++++++++++" );
+ secondary.foo.find().forEach( printjsononeline )
+ print( "---------------------" );
+}
+
+s = new ShardingTest( "shard2" , 2 , 2 );
db = s.getDB( "test" );
@@ -26,7 +36,7 @@ db.foo.save( { num : 1 , name : "eliot" } );
db.foo.save( { num : 2 , name : "sara" } );
db.foo.save( { num : -1 , name : "joe" } );
-s.adminCommand( "connpoolsync" );
+db.getLastError();
assert.eq( 3 , s.getServer( "test" ).getDB( "test" ).foo.find().length() , "not right directly to db A" );
assert.eq( 3 , db.foo.find().length() , "not right on shard" );
@@ -59,18 +69,18 @@ placeCheck( 3 );
// test inserts go to right server/shard
db.foo.save( { num : 3 , name : "bob" } );
-s.adminCommand( "connpoolsync" );
+db.getLastError();
assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" );
assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
db.foo.save( { num : -2 , name : "funny man" } );
-s.adminCommand( "connpoolsync" );
+db.getLastError();
assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" );
assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
db.foo.save( { num : 0 , name : "funny guy" } );
-s.adminCommand( "connpoolsync" );
+db.getLastError();
assert.eq( 2 , primary.foo.find().length() , "boundary A" );
assert.eq( 4 , secondary.foo.find().length() , "boundary B" );
@@ -129,6 +139,16 @@ assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1
placeCheck( 7 );
+db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } )
+
+zzz = db.foo.find().explain();
+assert.eq( 6 , zzz.nscanned , "EX1a" )
+assert.eq( 6 , zzz.n , "EX1b" )
+
+zzz = db.foo.find().sort( { _id : 1 } ).explain();
+assert.eq( 6 , zzz.nscanned , "EX2a" )
+assert.eq( 6 , zzz.n , "EX2a" )
+
// getMore
assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
function countCursor( c ){
@@ -178,6 +198,19 @@ placeCheck( 8 );
db.getLastError();
db.getPrevError();
+// more update stuff
+
+printAll();
+total = db.foo.find().count();
+db.foo.update( {} , { $inc : { x : 1 } } , false , true );
+x = db.getLastErrorObj();
+printAll();
+assert.eq( total , x.n , "getLastError n A: " + tojson( x ) );
+
+
+db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true );
+assert.eq( 1 , db.getLastErrorObj().n , "getLastErrorObj n B" );
+
// ---- move all to the secondary
assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
@@ -191,4 +224,6 @@ s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : primary.ge
assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards again" );
assert.eq( 3 , s.config.chunks.count() , "only 3 chunks" );
+print( "YO : " + tojson( db.runCommand( "serverStatus" ) ) );
+
s.stop();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 8c5b184..9f0cef4 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,12 +1,14 @@
// shard3.js
-s = new ShardingTest( "shard3" , 2 , 50 , 2 );
+s = new ShardingTest( "shard3" , 2 , 1 , 2 );
s2 = s._mongos[1];
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+s.config.databases.find().forEach( printjson )
+
a = s.getDB( "test" ).foo;
b = s2.getDB( "test" ).foo;
@@ -35,6 +37,8 @@ assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3"
assert.eq( 3 , a.find().toArray().length , "normal B" );
assert.eq( 3 , b.find().toArray().length , "other B" );
+printjson( primary._db._adminCommand( "shardingState" ) );
+
// --- filtering ---
function doCounts( name , total ){
@@ -47,8 +51,8 @@ function doCounts( name , total ){
}
var total = doCounts( "before wrong save" )
-secondary.save( { num : -3 } );
-doCounts( "after wrong save" , total )
+//secondary.save( { num : -3 } );
+//doCounts( "after wrong save" , total )
// --- move all to 1 ---
print( "MOVE ALL TO 1" );
@@ -60,12 +64,16 @@ assert( a.findOne( { num : 1 } ) )
assert( b.findOne( { num : 1 } ) )
print( "GOING TO MOVE" );
+assert( a.findOne( { num : 1 } ) , "pre move 1" )
s.printCollectionInfo( "test.foo" );
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s.getOther( s.getServer( "test" ) ).name } );
+myto = s.getOther( s.getServer( "test" ) ).name
+print( "counts before move: " + tojson( s.shardCounts( "foo" ) ) );
+s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto } )
+print( "counts after move: " + tojson( s.shardCounts( "foo" ) ) );
s.printCollectionInfo( "test.foo" );
assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" );
-assert( a.findOne( { num : 1 } ) )
-assert( b.findOne( { num : 1 } ) )
+assert( a.findOne( { num : 1 } ) , "post move 1" )
+assert( b.findOne( { num : 1 } ) , "post move 2" )
print( "*** drop" );
@@ -127,4 +135,32 @@ s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
assert.eq( 0 , doCounts( "after dropDatabase called" ) )
+// ---- retry commands SERVER-1471 ----
+
+s.adminCommand( { enablesharding : "test2" } );
+s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
+a = s.getDB( "test2" ).foo;
+b = s2.getDB( "test2" ).foo;
+a.save( { num : 1 } );
+a.save( { num : 2 } );
+a.save( { num : 3 } );
+
+
+assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
+assert.eq( 3 , a.count() , "Ba" );
+assert.eq( 3 , b.count() , "Bb" );
+
+s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } );
+s.adminCommand( { movechunk : "test2.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test2" ) ).name } );
+
+assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
+
+x = a.stats()
+printjson( x )
+y = b.stats()
+printjson( y )
+
+
+
+
s.stop();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index e15d74c..70c5ed7 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -1,12 +1,30 @@
// shard6.js
-s = new ShardingTest( "shard6" , 2 , 0 , 1 );
+summary = "";
+
+s = new ShardingTest( "shard6" , 2 , 0 , 2 );
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
db = s.getDB( "test" );
+function poolStats( where ){
+ var total = 0;
+ var msg = "poolStats " + where + " ";
+ var x = db.runCommand( "connPoolStats" ).hosts
+ for ( var h in x ){
+ var z = x[h];
+ msg += z.created + " ";
+ total += z.created
+ }
+ print( "****\n" + msg + "\n*****" )
+ summary += msg + "\n";
+ return total
+}
+
+poolStats( "at start" )
+
// we want a lot of data, so lets make a 50k string to cheat :)
bigString = "";
while ( bigString.length < 50000 )
@@ -18,22 +36,71 @@ for ( ; num<100; num++ ){
db.data.save( { num : num , bigString : bigString } );
}
-assert.eq( 100 , db.data.find().toArray().length );
+assert.eq( 100 , db.data.find().toArray().length , "basic find after setup" );
+
+connBefore = poolStats( "setup done" )
// limit
assert.eq( 77 , db.data.find().limit(77).itcount() , "limit test 1" );
assert.eq( 1 , db.data.find().limit(1).itcount() , "limit test 2" );
for ( var i=1; i<10; i++ ){
- assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3 : " + i );
+ assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3a : " + i );
+ assert.eq( i , db.data.find().skip(i).limit(i).itcount() , "limit test 3b : " + i );
+ poolStats( "after loop : " + i );
}
+assert.eq( connBefore , poolStats( "limit test done" ) , "limit test conns" );
+
+function assertOrder( start , num ){
+ var a = db.data.find().skip(start).limit(num).sort( { num : 1 } ).map( function(z){ return z.num; } );
+ var c = []
+ for ( var i=0; i<num; i++ )
+ c.push( start + i );
+ assert.eq( c , a , "assertOrder start: " + start + " num: " + num );
+}
+
+assertOrder( 0 , 10 );
+assertOrder( 5 , 10 );
+
+poolStats( "after checking order" )
+
+function doItCount( skip , sort , batchSize ){
+ var c = db.data.find();
+ if ( skip )
+ c.skip( skip )
+ if ( sort )
+ c.sort( sort );
+ if ( batchSize )
+ c.batchSize( batchSize )
+ return c.itcount();
+
+}
+
+function checkItCount( batchSize ){
+ assert.eq( 5 , doItCount( num - 5 , null , batchSize ) , "skip 1 " + batchSize );
+ assert.eq( 5 , doItCount( num - 5 , { num : 1 } , batchSize ) , "skip 2 " + batchSize );
+ assert.eq( 5 , doItCount( num - 5 , { _id : 1 } , batchSize ) , "skip 3 " + batchSize );
+ assert.eq( 0 , doItCount( num + 5 , { num : 1 } , batchSize ) , "skip 4 " + batchSize );
+ assert.eq( 0 , doItCount( num + 5 , { _id : 1 } , batchSize ) , "skip 5 " + batchSize );
+}
+
+poolStats( "before checking itcount" )
+
+checkItCount( 0 )
+checkItCount( 2 )
+
+poolStats( "after checking itcount" )
// --- test save support ---
o = db.data.findOne();
o.x = 16;
db.data.save( o );
-assert.eq( 16 , db.data.findOne( { _id : o._id } ).x , "x1 - did save fail?" );
+o = db.data.findOne( { _id : o._id } )
+assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
+
+poolStats( "at end" )
+print( summary )
s.stop();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
new file mode 100644
index 0000000..0edb7a7
--- /dev/null
+++ b/jstests/sharding/sort1.js
@@ -0,0 +1,81 @@
+
+s = new ShardingTest( "sort1" , 2 , 0 , 2 )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+
+db = s.getDB( "test" );
+
+N = 100
+
+forward = []
+backward = []
+for ( i=0; i<N; i++ ){
+ db.data.insert( { _id : i , num : i , x : N - i } )
+ forward.push( i )
+ backward.push( ( N - 1 ) - i )
+}
+db.getLastError();
+
+s.adminCommand( { split : "test.data" , middle : { num : 33 } } )
+s.adminCommand( { split : "test.data" , middle : { num : 66 } } )
+
+s.adminCommand( { movechunk : "test.data" , find : { num : 50 } , to : s.getOther( s.getServer( "test" ) ).name } );
+
+assert.eq( 3 , s.config.chunks.find().itcount() , "A1" );
+
+temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
+assert.eq( temp[0].shard , temp[2].shard , "A2" );
+assert.neq( temp[0].shard , temp[1].shard , "A3" );
+
+temp = db.data.find().sort( { num : 1 } ).toArray();
+assert.eq( N , temp.length , "B1" );
+for ( i=0; i<100; i++ ){
+ assert.eq( i , temp[i].num , "B2" )
+}
+
+
+db.data.find().sort( { num : 1 } ).toArray();
+s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray();
+
+a = Date.timeFunc( function(){ z = db.data.find().sort( { num : 1 } ).toArray(); } , 200 );
+assert.eq( 100 , z.length , "C1" )
+b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray(); } , 200 );
+assert.eq( 67 , z.length , "C2" )
+
+print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" )
+
+// -- secondary index sorting
+
+function getSorted( by , want , dir , proj ){
+ var s = {}
+ s[by] = dir || 1;
+ printjson( s )
+ var cur = db.data.find( {} , proj || {} ).sort( s )
+ return terse( cur.map( function(z){ return z[want]; } ) );
+}
+
+function terse( a ){
+ var s = "";
+ for ( var i=0; i<a.length; i++ ){
+ if ( i > 0 )
+ s += ",";
+ s += a[i];
+ }
+ return s;
+}
+
+forward = terse(forward);
+backward = terse(backward);
+
+assert.eq( forward , getSorted( "num" , "num" , 1 ) , "D1" )
+assert.eq( backward , getSorted( "num" , "num" , -1 ) , "D2" )
+
+assert.eq( backward , getSorted( "x" , "num" , 1 ) , "D3" )
+assert.eq( forward , getSorted( "x" , "num" , -1 ) , "D4" )
+
+assert.eq( backward , getSorted( "x" , "num" , 1 , { num : 1 } ) , "D5" )
+assert.eq( forward , getSorted( "x" , "num" , -1 , { num : 1 } ) , "D6" )
+
+
+s.stop();
diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js
index ad27645..3733906 100644
--- a/jstests/sharding/splitpick.js
+++ b/jstests/sharding/splitpick.js
@@ -17,17 +17,23 @@ for ( var i=1; i<20; i++ ){
c.save( { a : i } );
}
c.save( { a : 99 } );
+db.getLastError();
-assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 1 } } ).middle.a , 1 , "splitvalue 1" );
-assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 1 , "splitvalue 2" );
+function checkSplit( f, want , num ){
+ x = s.admin.runCommand( { splitvalue : "test.foo" , find : { a : f } } );
+ assert.eq( want, x.middle ? x.middle.a : null , "splitvalue " + num + " " + tojson( x ) );
+}
+
+checkSplit( 1 , 1 , "1" )
+checkSplit( 3 , 1 , "2" )
s.adminCommand( { split : "test.foo" , find : { a : 1 } } );
-assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 3 } } ).middle.a , 99 , "splitvalue 3" );
+checkSplit( 3 , 99 , "3" )
s.adminCommand( { split : "test.foo" , find : { a : 99 } } );
assert.eq( s.config.chunks.count() , 3 );
s.printChunks();
-assert.eq( s.admin.runCommand( { splitvalue : "test.foo" , find : { a : 50 } } ).middle.a , 10 , "splitvalue 4 " );
+checkSplit( 50 , 10 , "4" )
s.stop();
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
new file mode 100644
index 0000000..c75d208
--- /dev/null
+++ b/jstests/sharding/stats.js
@@ -0,0 +1,60 @@
+s = new ShardingTest( "stats" , 2 , 1 , 1 );
+s.adminCommand( { enablesharding : "test" } );
+
+a = s._connections[0].getDB( "test" );
+b = s._connections[1].getDB( "test" );
+
+db = s.getDB( "test" );
+
+function numKeys(o){
+ var num = 0;
+ for (var x in o)
+ num++;
+ return num;
+}
+
+// ---------- load some data -----
+
+// need collections sharded before and after main collection for proper test
+s.adminCommand( { shardcollection : "test.aaa" , key : { _id : 1 } } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); // this collection is actually used
+s.adminCommand( { shardcollection : "test.zzz" , key : { _id : 1 } } );
+
+
+N = 10000;
+s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } )
+s.adminCommand( { moveChunk : "test.foo", find : { _id : 3 } ,to : s.getNonPrimaries( "test" )[0] } )
+
+for ( i=0; i<N; i++ )
+ db.foo.insert( { _id : i } )
+db.getLastError();
+
+x = db.foo.stats();
+assert.eq( N , x.count , "coll total count expected" )
+assert.eq( db.foo.count() , x.count , "coll total count match" )
+assert.eq( 2 , x.nchunks , "coll chunk num" )
+assert.eq( 2 , numKeys(x.shards) , "coll shard num" )
+assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" )
+assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" )
+assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" )
+assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" )
+
+
+a_extras = a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes
+b_extras = b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes
+print("a_extras: " + a_extras);
+print("b_extras: " + b_extras);
+
+x = db.stats();
+
+//dbstats uses Future::CommandResult so raw output uses connection strings not shard names
+shards = Object.keySet(x.raw);
+
+assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" )
+assert.eq( 2 , numKeys(x.raw) , "db shard num" )
+assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" )
+assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" )
+assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" )
+assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" )
+
+s.stop()
diff --git a/jstests/sharding/sync1.js b/jstests/sharding/sync1.js
index 905b488..e649387 100644
--- a/jstests/sharding/sync1.js
+++ b/jstests/sharding/sync1.js
@@ -18,4 +18,9 @@ assert.eq( 2 , t.find().itcount() , "B2" );
test.tempStart();
test.checkHashes( "test" , "B3" );
+
+assert.eq( 2 , t.find().itcount() , "C1" );
+t.remove( { x : 1 } )
+assert.eq( 1 , t.find().itcount() , "C2" );
+
test.stop();
diff --git a/jstests/sharding/sync2.js b/jstests/sharding/sync2.js
index b0bbcb6..c249d11 100644
--- a/jstests/sharding/sync2.js
+++ b/jstests/sharding/sync2.js
@@ -7,13 +7,13 @@ s2 = s._mongos[1];
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-s.getDB( "test" ).foo.save( { num : 1 } );
-s.getDB( "test" ).foo.save( { num : 2 } );
-s.getDB( "test" ).foo.save( { num : 3 } );
-s.getDB( "test" ).foo.save( { num : 4 } );
-s.getDB( "test" ).foo.save( { num : 5 } );
-s.getDB( "test" ).foo.save( { num : 6 } );
-s.getDB( "test" ).foo.save( { num : 7 } );
+s.getDB( "test" ).foo.insert( { num : 1 } );
+s.getDB( "test" ).foo.insert( { num : 2 } );
+s.getDB( "test" ).foo.insert( { num : 3 } );
+s.getDB( "test" ).foo.insert( { num : 4 } );
+s.getDB( "test" ).foo.insert( { num : 5 } );
+s.getDB( "test" ).foo.insert( { num : 6 } );
+s.getDB( "test" ).foo.insert( { num : 7 } );
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
@@ -21,10 +21,10 @@ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getFirstOther( s.getServer( "test" ) ).name } );
-assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
-assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
+assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "shard 0 request" );
+assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "shard 1 request" );
assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
- s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
+ s._connections[1].getDB( "test" ).foo.find().toArray().length , "combined shards" );
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
@@ -45,4 +45,54 @@ for ( var i=0; i<10; i++ ){
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i );
}
+assert.eq( 0 , s.config.big.find().itcount() , "C1" );
+for ( i=0; i<50; i++ ){
+ s.config.big.insert( { _id : i } );
+}
+s.config.getLastError();
+assert.eq( 50 , s.config.big.find().itcount() , "C2" );
+assert.eq( 50 , s.config.big.find().count() , "C3" );
+assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" );
+
+
+hashes = []
+
+for ( i=0; i<3; i++ ){
+ print( i );
+ s._connections[i].getDB( "config" ).chunks.find( {} , { lastmod : 1 } ).forEach( printjsononeline );
+ hashes[i] = s._connections[i].getDB( "config" ).runCommand( "dbhash" );
+}
+
+printjson( hashes );
+
+for ( i=1; i<hashes.length; i++ ){
+ if ( hashes[0].md5 == hashes[i].md5 )
+ continue;
+
+ assert.eq( hashes[0].numCollections , hashes[i].numCollections , "num collections" );
+
+ var bad = false;
+
+ for ( var k in hashes[0].collections ){
+ if ( hashes[0].collections[k] ==
+ hashes[i].collections[k] )
+ continue;
+
+ if ( k == "mongos" || k == "changelog" || k == "locks" )
+ continue;
+
+ bad = true;
+ print( "collection " + k + " is different" );
+
+ print( "----" );
+ s._connections[0].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
+ print( "----" );
+ s._connections[i].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
+ print( "----" );
+ }
+
+ if ( bad )
+ throw "hashes different";
+}
+
s.stop();
diff --git a/jstests/sharding/sync3.js b/jstests/sharding/sync3.js
new file mode 100644
index 0000000..3737419
--- /dev/null
+++ b/jstests/sharding/sync3.js
@@ -0,0 +1,10 @@
+
+test = new SyncCCTest( "sync3" , { logpath : "/dev/null" } )
+
+x = test._connections[0].getDB( "admin" ).runCommand( { "_testDistLockWithSyncCluster" : 1 , host : test.url } )
+printjson( x )
+assert( x.ok );
+
+
+
+test.stop();
diff --git a/jstests/sharding/sync4.js b/jstests/sharding/sync4.js
new file mode 100644
index 0000000..6733f07
--- /dev/null
+++ b/jstests/sharding/sync4.js
@@ -0,0 +1,19 @@
+
+test = new SyncCCTest( "sync4" )
+
+db = test.conn.getDB( "test" )
+t = db.sync4
+
+for ( i=0; i<1000; i++ ){
+ t.insert( { _id : i , x : "asdasdsdasdas" } )
+}
+db.getLastError();
+
+test.checkHashes( "test" , "A0" );
+assert.eq( 1000 , t.find().count() , "A1" )
+assert.eq( 1000 , t.find().itcount() , "A2" )
+assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" )
+
+
+
+test.stop();
diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js
index 82c3d8a..63d4bf6 100644
--- a/jstests/sharding/update1.js
+++ b/jstests/sharding/update1.js
@@ -8,20 +8,24 @@ coll = db.update1;
coll.insert({_id:1, key:1});
-// these are upserts
+// these are both upserts
coll.save({_id:2, key:2});
-coll.save({_id:3, key:3});
+coll.update({_id:3, key:3}, {$set: {foo: 'bar'}}, {upsert: true});
assert.eq(coll.count(), 3, "count A")
+assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A")
+assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A")
// update existing using save()
coll.save({_id:1, key:1, other:1});
// update existing using update()
coll.update({_id:2}, {key:2, other:2});
-//coll.update({_id:3, key:3}, {other:3}); //should add key to new object (doesn't work yet)
coll.update({_id:3}, {key:3, other:3});
+coll.update({_id:3, key:3}, {other:4});
+assert.eq(db.getLastErrorObj().code, 12376, 'bad update error');
+
assert.eq(coll.count(), 3, "count B")
coll.find().forEach(function(x){
assert.eq(x._id, x.key, "_id == key");
@@ -29,5 +33,14 @@ coll.find().forEach(function(x){
});
+coll.update({_id:1, key:1}, {$set: {key:2}});
+err = db.getLastErrorObj();
+assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged');
+assert.eq(err.code, 13123, 'key error code 1');
+assert.eq(err.code, 13123, 'key error code 2');
+
+coll.update({_id:1, key:1}, {$set: {foo:2}});
+assert.isnull(db.getLastError(), 'getLastError reset');
+
s.stop()
diff --git a/jstests/shellkillop.js b/jstests/shellkillop.js
index e8a9763..1091458 100644
--- a/jstests/shellkillop.js
+++ b/jstests/shellkillop.js
@@ -7,7 +7,7 @@ for( i = 0; i < 100000; ++i ) {
}
assert.eq( 100000, db[ baseName ].count() );
-spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "db." + baseName + ".update( {}, {$set:{i:\"abcdefghijkl\"}}, false, true ); db." + baseName + ".count();" );
+spawn = startMongoProgramNoConnect( "mongo", "--autokillop", "--port", myPort(), "--eval", "db." + baseName + ".update( {}, {$set:{i:\"abcdefghijkl\"}}, false, true ); db." + baseName + ".count();" );
sleep( 100 );
stopMongoProgramByPid( spawn );
sleep( 100 );
diff --git a/jstests/shellspawn.js b/jstests/shellspawn.js
index 5b0de6b..7df3c04 100644
--- a/jstests/shellspawn.js
+++ b/jstests/shellspawn.js
@@ -14,11 +14,15 @@ else {
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "print( 'I am a shell' );" );
+ stopMongoProgramByPid( spawn );
+
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort() );
+
+ stopMongoProgramByPid( spawn );
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort() );
stopMongoProgramByPid( spawn );
-
+
// all these shells should be killed
}
diff --git a/jstests/slice1.js b/jstests/slice1.js
new file mode 100644
index 0000000..b20e7e4
--- /dev/null
+++ b/jstests/slice1.js
@@ -0,0 +1,68 @@
+t = db.slice1;
+t.drop();
+
+t.insert({_id:1, a:[0,1,2,3,4,5,-5,-4,-3,-2,-1], b:1, c:1});
+
+// first three
+out = t.findOne({}, {a:{$slice:3}});
+assert.eq(out.a , [0,1,2], '1');
+
+// last three
+out = t.findOne({}, {a:{$slice:-3}});
+assert.eq(out.a , [-3, -2, -1], '2');
+
+// skip 2, limit 3
+out = t.findOne({}, {a:{$slice:[2, 3]}});
+assert.eq(out.a , [2,3,4], '3');
+
+// skip to fifth from last, limit 4
+out = t.findOne({}, {a:{$slice:[-5, 4]}});
+assert.eq(out.a , [-5, -4, -3, -2], '4');
+
+// skip to fifth from last, limit 10
+out = t.findOne({}, {a:{$slice:[-5, 10]}});
+assert.eq(out.a , [-5, -4, -3, -2, -1], '5');
+
+
+// interaction with other fields
+
+out = t.findOne({}, {a:{$slice:3}});
+assert.eq(out.a , [0,1,2], 'A 1');
+assert.eq(out.b , 1, 'A 2');
+assert.eq(out.c , 1, 'A 3');
+
+out = t.findOne({}, {a:{$slice:3}, b:true});
+assert.eq(out.a , [0,1,2], 'B 1');
+assert.eq(out.b , 1, 'B 2');
+assert.eq(out.c , undefined);
+
+out = t.findOne({}, {a:{$slice:3}, b:false});
+assert.eq(out.a , [0,1,2]);
+assert.eq(out.b , undefined);
+assert.eq(out.c , 1);
+
+t.drop()
+t.insert({comments: [{id:0, text:'a'},{id:1, text:'b'},{id:2, text:'c'},{id:3, text:'d'}], title:'foo'})
+
+
+out = t.findOne({}, {comments:{$slice:2}, 'comments.id':true});
+assert.eq(out.comments , [{id:0}, {id:1}]);
+assert.eq(out.title , undefined);
+
+out = t.findOne({}, {comments:{$slice:2}, 'comments.id':false});
+assert.eq(out.comments , [{text: 'a'}, {text: 'b'}]);
+assert.eq(out.title , 'foo');
+
+//nested arrays
+t.drop();
+t.insert({_id:1, a:[[1,1,1], [2,2,2], [3,3,3]], b:1, c:1});
+
+out = t.findOne({}, {a:{$slice:1}});
+assert.eq(out.a , [[1,1,1]], 'n 1');
+
+out = t.findOne({}, {a:{$slice:-1}});
+assert.eq(out.a , [[3,3,3]], 'n 2');
+
+out = t.findOne({}, {a:{$slice:[0,2]}});
+assert.eq(out.a , [[1,1,1],[2,2,2]], 'n 2');
+
diff --git a/jstests/slowNightly/remove9.js b/jstests/slowNightly/remove9.js
new file mode 100644
index 0000000..e7dfe9b
--- /dev/null
+++ b/jstests/slowNightly/remove9.js
@@ -0,0 +1,12 @@
+t = db.jstests_remove9;
+t.drop();
+
+js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
+pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null );
+
+for( var i = 0; i < 10000; ++i ) {
+ t.remove( {i:Random.randInt( 10000 )} );
+ assert.automsg( "!db.getLastError()" );
+}
+
+stopMongoProgramByPid( pid ); \ No newline at end of file
diff --git a/jstests/slowNightly/run_sharding_passthrough.js b/jstests/slowNightly/run_sharding_passthrough.js
new file mode 100644
index 0000000..fda982b
--- /dev/null
+++ b/jstests/slowNightly/run_sharding_passthrough.js
@@ -0,0 +1,94 @@
+s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+s.adminCommand( { enablesharding : "test" } );
+db=s.getDB("test");
+
+var files = listFiles("jstests");
+
+var runnerStart = new Date()
+
+files.forEach(
+ function(x) {
+
+// /(basic|update).*\.js$/
+ if ( /[\/\\]_/.test(x.name) ||
+ ! /\.js$/.test(x.name ) ){
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+
+ // Notes:
+
+ // apply_ops1: nothing works, dunno why yet. SERVER-1439
+
+ // copydb, copydb2: copyDatabase seems not to work at all in
+ // the ShardingTest setup. SERVER-1440
+
+ // cursor8: cursorInfo different/meaningless(?) in mongos
+ // closeAllDatabases may not work through mongos
+ // SERVER-1441
+ // deal with cursorInfo in mongos SERVER-1442
+
+ // dbcase: Database names are case-insensitive under ShardingTest?
+ // SERVER-1443
+
+ // These are all SERVER-1444
+ // count5: limit() and maybe skip() may be unreliable
+ // geo3: limit() not working, I think
+ // or4: skip() not working?
+
+ // shellkillop: dunno yet. SERVER-1445
+
+ // These should simply not be run under sharding:
+ // dbadmin: Uncertain Cut-n-pasting its contents into mongo worked.
+ // error1: getpreverror not supported under sharding
+ // fsync, fsync2: isn't supported through mongos
+ // remove5: getpreverror, I think. don't run
+ // update4: getpreverror don't run
+
+ // Around July 20, command passthrough went away, and these
+ // commands weren't implemented:
+ // clean cloneCollectionAsCapped copydbgetnonce dataSize
+ // datasize dbstats deleteIndexes dropIndexes forceerror
+ // getnonce logout medianKey profile reIndex repairDatabase
+ // reseterror splitVector validate
+
+ /* missing commands :
+ * forceerror and switchtoclienterrors
+ * cloneCollectionAsCapped
+ * splitvector
+ * profile (apitest_db, cursor6, evalb)
+ * copydbgetnonce
+ * dbhash
+ * medianKey
+ * clean (apitest_dbcollection)
+ * logout and getnonce
+ */
+ if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|auth1|auth2)\.js$/.test(x.name)) {
+ print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
+ return;
+ }
+ // These are bugs (some might be fixed now):
+ if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4)\.js$/.test(x.name)) {
+ print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
+ return;
+ }
+ // These aren't supposed to get run under sharding:
+ if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4)\.js$/.test(x.name)) {
+ print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name)
+ return;
+ }
+
+ print(" *******************************************");
+ print(" Test : " + x.name + " ...");
+ print(" " + Date.timeFunc(
+ function() {
+ load(x.name);
+ }, 1) + "ms");
+
+ }
+);
+
+
+var runnerEnd = new Date()
+
+print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" )
diff --git a/jstests/slowNightly/sharding_balance1.js b/jstests/slowNightly/sharding_balance1.js
new file mode 100644
index 0000000..840aaff
--- /dev/null
+++ b/jstests/slowNightly/sharding_balance1.js
@@ -0,0 +1,55 @@
+// sharding_balance1.js
+
+
+s = new ShardingTest( "slow_sharding_balance1" , 2 , 2 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+function sum(){
+ var x = s.chunkCounts( "foo" );
+ return x.shard0000 + x.shard0001;
+}
+
+assert.lt( 20 , diff() , "big differential here" );
+print( diff() )
+
+assert.soon( function(){
+ var d = diff();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+
+var chunkCount = sum();
+s.adminCommand( { removeshard: "shard0000" } );
+
+assert.soon( function(){
+ printjson(s.chunkCounts( "foo" ));
+ s.config.shards.find().forEach(function(z){printjson(z);});
+ return chunkCount == s.config.chunks.count({shard: "shard0001"});
+} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
+
+s.stop();
diff --git a/jstests/slowNightly/sharding_balance2.js b/jstests/slowNightly/sharding_balance2.js
new file mode 100644
index 0000000..c94e256
--- /dev/null
+++ b/jstests/slowNightly/sharding_balance2.js
@@ -0,0 +1,54 @@
+// sharding_balance2.js
+
+s = new ShardingTest( "slow_sharding_balance2" , 2 , 2 , 1 , { chunksize : 1 , manualAddShard : true } )
+
+names = s.getConnNames();
+for ( var i=0; i<names.length; i++ ){
+ if ( i==1 ) {
+ // We set maxSize of the shard to something artificially low. That mongod would still
+ // allocate and mmap storage as usual but the balancing mongos would not ship any chunk
+ // to it.
+ s.adminCommand( { addshard : names[i] , maxSize : 1 } );
+ } else {
+ s.adminCommand( { addshard : names[i] } );
+ }
+}
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 40 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+assert.lt( 10 , diff() );
+print( diff() )
+
+var currDiff = diff();
+assert.repeat( function(){
+ var d = diff();
+ return d != currDiff;
+} , "balance with maxSize should not have happened" , 1000 * 30 , 5000 );
+
+
+s.stop();
diff --git a/jstests/slowNightly/sharding_balance3.js b/jstests/slowNightly/sharding_balance3.js
new file mode 100644
index 0000000..faec197
--- /dev/null
+++ b/jstests/slowNightly/sharding_balance3.js
@@ -0,0 +1,57 @@
+// sharding_balance3.js
+
+s = new ShardingTest( "slow_sharding_balance3" , 2 , 2 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson );
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 40 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+assert.lt( 10 , diff() );
+
+// Wait for balancer to kick in.
+var initialDiff = diff();
+var maxRetries = 3;
+while ( diff() == initialDiff ){
+ sleep( 5000 );
+ assert.lt( 0, maxRetries--, "Balancer did not kick in.");
+}
+
+print("* A");
+print( "disabling the balancer" );
+s.config.settings.update( { _id : "balancer" }, { $set : { stopped : true } } , true );
+s.config.settings.find().forEach( printjson );
+print("* B");
+
+
+print( diff() )
+
+var currDiff = diff();
+assert.repeat( function(){
+ var d = diff();
+ return d != currDiff;
+} , "balance with stopped flag should not have happened" , 1000 * 60 , 5000 );
+
+s.stop()
diff --git a/jstests/slowNightly/sharding_balance4.js b/jstests/slowNightly/sharding_balance4.js
new file mode 100644
index 0000000..c54d3da
--- /dev/null
+++ b/jstests/slowNightly/sharding_balance4.js
@@ -0,0 +1,122 @@
+// sharding_balance4.js
+
+s = new ShardingTest( "slow_sharding_balance4" , 2 , 2 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.eq( 1 , s.config.chunks.count() , "setup1" );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+N = 3000
+
+num = 0;
+
+counts = {}
+
+function doUpdate( includeString ){
+ var up = { $inc : { x : 1 } }
+ if ( includeString )
+ up["$set"] = { s : bigString };
+ var myid = Random.randInt( N )
+ db.foo.update( { _id : myid } , up , true );
+
+ counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
+ return myid;
+}
+
+for ( i=0; i<N*10; i++ ){
+ doUpdate( true )
+}
+db.getLastError();
+
+s.printChunks( "test.foo" )
+
+for ( var i=0; i<10; i++ ){
+ if ( check( "initial:" + i , true ) )
+ break;
+ sleep( 5000 )
+}
+check( "initial at end" )
+
+
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function dist(){
+ var x = {}
+ s.config.chunks.find( { ns : "test.foo" } ).forEach(
+ function(z){
+ if ( x[z.shard] )
+ x[z.shard]++
+ else
+ x[z.shard] = 1;
+ }
+ );
+ return x;
+}
+
+function check( msg , dontAssert ){
+ for ( var x in counts ){
+ var e = counts[x];
+ var z = db.foo.findOne( { _id : parseInt( x ) } )
+
+ if ( z && z.x == e )
+ continue;
+
+ if ( dontAssert )
+ return false;
+
+ sleep( 10000 );
+
+ var y = db.foo.findOne( { _id : parseInt( x ) } )
+
+ if ( y ){
+ delete y.s;
+ }
+
+ assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg )
+ assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg )
+ }
+
+ return true;
+}
+
+function diff(){
+ var myid = doUpdate( false )
+ var le = db.getLastErrorCmd();
+ if ( le.err )
+ print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
+
+ if ( Math.random() > .99 ){
+ db.getLastError()
+ check(); // SERVER-1430 TODO
+ }
+
+ var x = dist();
+ if ( Math.random() > .999 )
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+function sum(){
+ var x = dist();
+ return x.shard0000 + x.shard0001;
+}
+
+assert.lt( 20 , diff() ,"initial load" );
+print( diff() )
+
+assert.soon( function(){
+
+ var d = diff();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 3 , 1 );
+
+
+s.stop();
diff --git a/jstests/slowNightly/sharding_cursors1.js b/jstests/slowNightly/sharding_cursors1.js
new file mode 100644
index 0000000..307e8d7
--- /dev/null
+++ b/jstests/slowNightly/sharding_cursors1.js
@@ -0,0 +1,71 @@
+s = new ShardingTest( "cursors1" , 2 , 0 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = "x"
+while (bigString.length < 1024)
+ bigString += bigString;
+assert.eq(bigString.length, 1024, 'len');
+
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+toInsert = ( 1 * 1000 * 1000 );
+for (var i=0; i < toInsert; i++ ){
+ db.foo.insert( { i: i, r: Math.random(), s: bigString } );
+ assert.eq(db.getLastError(), null, 'no error'); //SERVER-1541
+}
+
+inserted = toInsert;
+for (var i=0; i < 10; i++ ){
+ //assert.gte(db.foo.count(), toInsert, 'inserted enough'); //sometimes fails
+ assert.gte(db.foo.count(), toInsert - 100, 'inserted enough');
+ inserted = Math.min(inserted, db.foo.count())
+ sleep (100);
+}
+
+print("\n\n\n **** inserted: " + inserted + '\n\n\n');
+
+/*
+
+var line = 0;
+try {
+ assert.gte(db.foo.find({}, {_id:1}).itcount(), inserted, 'itcount check - no sort - _id only');
+ line = 1;
+ assert.gte(db.foo.find({}, {_id:1}).sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - _id only');
+ line = 2;
+
+ db.foo.ensureIndex({i:1});
+ db.foo.ensureIndex({r:1});
+ db.getLastError();
+ line = 3;
+
+ assert.gte(db.foo.find({}, {i:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - i only');
+ line = 4;
+ assert.gte(db.foo.find({}, {_id:1}).sort({i:1}).itcount(), inserted, 'itcount check - i sort - _id only');
+ line = 5;
+
+ assert.gte(db.foo.find({}, {r:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - r only');
+ line = 6;
+ assert.gte(db.foo.find({}, {_id:1}).sort({r:1}).itcount(), inserted, 'itcount check - r sort - _id only');
+ line = 7;
+
+ assert.gte(db.foo.find().itcount(), inserted, 'itcount check - no sort - full');
+ line = 8;
+ assert.gte(db.foo.find().sort({_id:1}).itcount(), inserted, 'itcount check - _id sort - full');
+ line = 9;
+ assert.gte(db.foo.find().sort({i:1}).itcount(), inserted, 'itcount check - i sort - full');
+ line = 10;
+ assert.gte(db.foo.find().sort({r:1}).itcount(), inserted, 'itcount check - r sort - full');
+ line = 11;
+} catch (e) {
+ print("***** finished through line " + line + " before exception");
+ throw e;
+}
+
+*/
+
+s.stop();
diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js
new file mode 100644
index 0000000..3769e32
--- /dev/null
+++ b/jstests/slowNightly/sharding_rs1.js
@@ -0,0 +1,61 @@
+// tests sharding with replica sets
+
+s = new ShardingTest( "rs1" , 3 , 1 , 2 , { rs : true , chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff(){
+ var x = s.chunkCounts( "foo" );
+ var total = 0;
+ var min = 1000000000;
+ var max = 0;
+ for ( var sn in x ){
+ total += x[sn];
+ if ( x[sn] < min )
+ min = x[sn];
+ if ( x[sn] > max )
+ max = x[sn];
+ }
+
+ print( tojson(x) + " total: " + total + " min: " + min + " max: " + max )
+ return max - min;
+}
+
+assert.lt( 20 , diff() , "big differential here" );
+print( diff() )
+
+assert.soon( function(){
+ var d = diff();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+
+
+for ( i=0; i<s._rs.length; i++ ){
+ r = s._rs[i];
+ r.test.awaitReplication();
+ x = r.test.getHashes( "test" );
+ print( r.url + "\t" + tojson( x ) )
+ for ( j=0; j<x.slaves.length; j++ )
+ assert.eq( x.master.md5 , x.slaves[j].md5 , "hashes same for: " + r.url + " slave: " + j );
+}
+
+s.stop()
diff --git a/jstests/slowWeekly/conc_update.js b/jstests/slowWeekly/conc_update.js
new file mode 100644
index 0000000..6094136
--- /dev/null
+++ b/jstests/slowWeekly/conc_update.js
@@ -0,0 +1,51 @@
+db = db.getSisterDB("concurrency")
+db.dropDatabase();
+
+NRECORDS=5*1024*1024 // this needs to be relatively big so that
+ // the update() will take a while, but it could
+ // probably be smaller.
+
+print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)")
+for (i=0; i<(NRECORDS); i++) {
+ db.conc.insert({x:i})
+ if ((i%(1024*1024))==0)
+ print("loaded " + i/(1024*1024) + " mibi-records")
+}
+
+print("making an index (this will take a while)")
+db.conc.ensureIndex({x:1})
+
+var c1=db.conc.count({x:{$lt:NRECORDS}})
+// this is just a flag that the child will toggle when it's done.
+db.concflag.update({}, {inprog:true}, true)
+
+updater=startParallelShell("db=db.getSisterDB('concurrency');\
+ db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
+ e=db.getLastError();\
+ print('update error: '+ e);\
+ db.concflag.update({},{inprog:false});\
+ assert.eq(e, null, \"update failed\");");
+
+querycount=0;
+decrements=0;
+misses=0
+while (1) {
+ if (db.concflag.findOne().inprog) {
+ c2=db.conc.count({x:{$lt:NRECORDS}})
+ e=db.getLastError()
+ print(c2)
+ print(e)
+ assert.eq(e, null, "some count() failed")
+ querycount++;
+ if (c2<c1)
+ decrements++;
+ else
+ misses++;
+ c1 = c2;
+ } else
+ break;
+ sleep(10);
+}
+print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+
+updater() // wait()
diff --git a/jstests/slow/indexbg1.js b/jstests/slowWeekly/indexbg1.js
index 5e34d44..5e34d44 100644
--- a/jstests/slow/indexbg1.js
+++ b/jstests/slowWeekly/indexbg1.js
diff --git a/jstests/slow/indexbg2.js b/jstests/slowWeekly/indexbg2.js
index 1830f42..1830f42 100644
--- a/jstests/slow/indexbg2.js
+++ b/jstests/slowWeekly/indexbg2.js
diff --git a/jstests/slow/ns1.js b/jstests/slowWeekly/ns1.js
index f51db01..f51db01 100644
--- a/jstests/slow/ns1.js
+++ b/jstests/slowWeekly/ns1.js
diff --git a/jstests/slowWeekly/query_yield1.js b/jstests/slowWeekly/query_yield1.js
new file mode 100644
index 0000000..e996b53
--- /dev/null
+++ b/jstests/slowWeekly/query_yield1.js
@@ -0,0 +1,73 @@
+
+t = db.query_yield1;
+t.drop()
+
+N = 10000;
+i = 0;
+
+q = function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; }
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeQuery(){
+ return Date.timeFunc(
+ function(){
+ assert.eq( 0 , t.find( q ).itcount() );
+ }
+ );
+
+ }
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+assert.eq( 0, db.currentOp().inprog.length , "setup broken" );
+
+join = startParallelShell( "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); " )
+
+assert.soon(
+ function(){
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ } , "never doing query" , 2000 , 1
+);
+
+print( "start query" );
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var x = db.currentOp()
+
+ if ( num++ == 0 ){
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 50 , me );
+
+ if ( x.inprog.length == 0 )
+ break;
+
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
diff --git a/jstests/slowWeekly/query_yield2.js b/jstests/slowWeekly/query_yield2.js
new file mode 100644
index 0000000..e13fabe
--- /dev/null
+++ b/jstests/slowWeekly/query_yield2.js
@@ -0,0 +1,73 @@
+
+t = db.query_yield2;
+t.drop()
+
+N = 100;
+i = 0;
+
+q = function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; }
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeQuery(){
+ return Date.timeFunc(
+ function(){
+ assert.eq( 0 , t.find( q ).itcount() );
+ }
+ );
+
+ }
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+assert.eq( 0, db.currentOp().inprog.length , "setup broken" );
+
+join = startParallelShell( "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); " )
+
+assert.soon(
+ function(){
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ } , "never doing query" , 2000 , 1
+);
+
+print( "start query" );
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); db.getLastError(); } )
+ var x = db.currentOp()
+
+ if ( num++ == 0 ){
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 75 , me );
+
+ if ( x.inprog.length == 0 )
+ break;
+
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
diff --git a/jstests/slowWeekly/update_yield1.js b/jstests/slowWeekly/update_yield1.js
new file mode 100644
index 0000000..2e63690
--- /dev/null
+++ b/jstests/slowWeekly/update_yield1.js
@@ -0,0 +1,78 @@
+
+t = db.update_yield1;
+t.drop()
+
+N = 10000;
+i = 0;
+
+while ( true ){
+ function fill(){
+ for ( ; i<N; i++ ){
+ t.insert( { _id : i , n : 1 } )
+ }
+ }
+
+ function timeUpdate(){
+ return Date.timeFunc(
+ function(){
+ t.update( {} , { $inc : { n : 1 } } , false , true );
+ var r = db.getLastErrorObj();
+ }
+ );
+
+ }
+
+ fill();
+ timeUpdate();
+ timeUpdate();
+ time = timeUpdate();
+ print( N + "\t" + time );
+ if ( time > 2000 )
+ break;
+
+ N *= 2;
+}
+
+// --- test 1
+
+join = startParallelShell( "db.update_yield1.update( {} , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+
+assert.soon(
+ function(){
+ return db.currentOp().inprog.length > 0;
+ } , "never doing update"
+);
+
+num = 0;
+start = new Date();
+while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
+ var me = Date.timeFunc( function(){ t.findOne(); } );
+
+ if ( num++ == 0 ){
+ var x = db.currentOp()
+ assert.eq( 1 , x.inprog.length , "nothing in prog" );
+ }
+
+ assert.gt( 50 , me );
+}
+
+join();
+
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "weird 2" );
+
+// --- test 2
+
+join = startParallelShell( "db.update_yield1.update( { $atomic : true } , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
+
+assert.soon(
+ function(){
+ return db.currentOp().inprog.length > 0;
+ } , "never doing update 2"
+);
+
+t.findOne();
+var x = db.currentOp()
+assert.eq( 0 , x.inprog.length , "should have been atomic" );
+
+join();
diff --git a/jstests/splitvector.js b/jstests/splitvector.js
new file mode 100644
index 0000000..8d86319
--- /dev/null
+++ b/jstests/splitvector.js
@@ -0,0 +1,81 @@
+// -------------------------
+// SPLITVECTOR TEST UTILS
+// -------------------------
+
+// -------------------------
+// assertChunkSizes verifies that a given 'splitVec' divides the 'test.jstest_splitvector'
+// collection in 'maxChunkSize' approximately-sized chunks. Its asserts fail otherwise.
+// @param splitVec: an array with keys for field 'x'
+// e.g. [ { x : 1927 }, { x : 3855 }, ...
+// @param numDocs: domain of 'x' field
+// e.g. 20000
+// @param maxChunkSize is in MBs.
+//
+assertChunkSizes = function ( splitVec , numDocs , maxChunkSize ){
+ splitVec = [{ x: -1 }].concat( splitVec );
+ splitVec.push( { x: numDocs+1 } );
+ for ( i=0; i<splitVec.length-1; i++) {
+ min = splitVec[i];
+ max = splitVec[i+1];
+ size = db.runCommand( { datasize: "test.jstests_splitvector" , min: min , max: max } ).size;
+
+ // It is okay for the last chunk to be smaller. A collection's size does not
+ // need to be exactly a multiple of maxChunkSize.
+ if ( i < splitVec.length - 2 )
+ assert.close( maxChunkSize , size , "A"+i , -3 );
+ else
+ assert.gt( maxChunkSize, size, "A"+i );
+ }
+}
+
+
+// -------------------------
+// TESTS START HERE
+// -------------------------
+
+f = db.jstests_splitvector;
+f.drop();
+
+// -------------------------
+// Case: missing paramters
+
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" } ).ok );
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , maxChunkSize: 1} ).ok );
+
+
+// -------------------------
+// Case: missing index
+
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).ok );
+
+
+// -------------------------
+// Case: empty collection
+
+f.ensureIndex( { x: 1} );
+assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).splitKeys );
+
+
+// -------------------------
+// Case: uniform collection
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+// Get baseline document size
+filler = "";
+while( filler.length < 500 ) filler += "a";
+f.save( { x: 0, y: filler } );
+docSize = db.runCommand( { datasize: "test.jstests_splitvector" } ).size;
+assert.gt( docSize, 500 );
+
+// Fill collection and get split vector for 1MB maxChunkSize
+numDocs = 20000;
+for( i=1; i<numDocs; i++ ){
+ f.save( { x: i, y: filler } );
+}
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+
+assert.eq( true , res.ok );
+assert.close( numDocs*docSize / (1<<20) , res.splitKeys.length , "num split keys" , -1 );
+assertChunkSizes( res.splitKeys , numDocs, (1<<20) * 0.9 ); // splitVector cuts at 90% of maxChunkSize
diff --git a/jstests/tempCleanup.js b/jstests/tempCleanup.js
new file mode 100644
index 0000000..0a8a909
--- /dev/null
+++ b/jstests/tempCleanup.js
@@ -0,0 +1,16 @@
+
+mydb = db.getSisterDB( "temp_cleanup_test" )
+
+t = mydb.tempCleanup
+t.drop()
+
+t.insert( { x : 1 } )
+
+res = t.mapReduce( function(){ emit(1,1); } , function(){ return 1; } );
+printjson( res );
+
+assert.eq( 1 , t.count() , "A1" )
+assert.eq( 1 , mydb[res.result].count() , "A2" )
+
+mydb.dropDatabase()
+
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index edf9dc2..ccf1d09 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -4,25 +4,25 @@ t = new ToolTest( "csv1" )
c = t.startDB( "foo" );
-base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': 6 };
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-'};
assert.eq( 0 , c.count() , "setup1" );
c.insert( base );
delete base._id
assert.eq( 1 , c.count() , "setup2" );
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d" )
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e" )
c.drop()
assert.eq( 0 , c.count() , "after drop" )
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d" );
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e" );
assert.soon( "2 == c.count()" , "restore 2" );
a = c.find().sort( { a : 1 } ).toArray();
delete a[0]._id
delete a[1]._id
-assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d"} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e'} ) , tojson( a[1] ) , "csv parse 1" );
assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
c.drop()
diff --git a/jstests/tool/exportimport1.js b/jstests/tool/exportimport1.js
index 22934fe..915adcd 100644
--- a/jstests/tool/exportimport1.js
+++ b/jstests/tool/exportimport1.js
@@ -17,4 +17,17 @@ assert.soon( "c.findOne()" , "no data after sleep" );
assert.eq( 1 , c.count() , "after restore 2" );
assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
t.stop();
diff --git a/jstests/tool/files1.js b/jstests/tool/files1.js
new file mode 100644
index 0000000..acfcc16
--- /dev/null
+++ b/jstests/tool/files1.js
@@ -0,0 +1,27 @@
+// files1.js
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/jstests/update_addToSet2.js b/jstests/update_addToSet2.js
new file mode 100644
index 0000000..cb168f8
--- /dev/null
+++ b/jstests/update_addToSet2.js
@@ -0,0 +1,11 @@
+
+t = db.update_addToSet2
+t.drop();
+
+o = { _id : 1 }
+t.insert( { _id : 1 } );
+
+t.update({},{$addToSet : {'kids' :{ 'name' : 'Bob', 'age': '4'}}})
+t.update({},{$addToSet : {'kids' :{ 'name' : 'Dan', 'age': '2'}}})
+
+printjson( t.findOne() );
diff --git a/jstests/update_arraymatch4.js b/jstests/update_arraymatch4.js
new file mode 100644
index 0000000..5abd0aa
--- /dev/null
+++ b/jstests/update_arraymatch4.js
@@ -0,0 +1,18 @@
+
+t = db.update_arraymatch4
+t.drop()
+
+x = { _id : 1 , arr : ["A1","B1","C1"] }
+t.insert( x )
+assert.eq( x , t.findOne() , "A1" )
+
+x.arr[0] = "A2"
+t.update( { arr : "A1" } , { $set : { "arr.$" : "A2" } } )
+assert.eq( x , t.findOne() , "A2" )
+
+t.ensureIndex( { arr : 1 } )
+x.arr[0] = "A3"
+t.update( { arr : "A2" } , { $set : { "arr.$" : "A3" } } )
+assert.eq( x , t.findOne() , "A3" ); // SERVER-1055
+
+
diff --git a/jstests/update_arraymatch5.js b/jstests/update_arraymatch5.js
new file mode 100644
index 0000000..aff1a03
--- /dev/null
+++ b/jstests/update_arraymatch5.js
@@ -0,0 +1,15 @@
+
+t = db.update_arraymatch5
+t.drop();
+
+t.insert({abc:{visible:true}, testarray:[{foobar_id:316, visible:true, xxx: 1}]});
+t.ensureIndex({'abc.visible':1, 'testarray.visible':1 , 'testarray.xxx': 1});
+assert( t.findOne({'abc.visible':true, testarray:{'$elemMatch': {visible:true, xxx:1}}}) , "A1" )
+assert( t.findOne({testarray:{'$elemMatch': {visible:true, xxx:1}}}) , "A2" );
+
+t.update({'testarray.foobar_id':316}, {'$set': {'testarray.$.visible': true, 'testarray.$.xxx': 2}}, false, true);
+
+assert( t.findOne() , "B1" );
+assert( t.findOne({testarray:{'$elemMatch': {visible:true, xxx:2}}}) , "B2" )
+assert( t.findOne({'abc.visible':true, testarray:{'$elemMatch': {visible:true, xxx:2}}}) , "B3" );
+assert.eq( 1 , t.find().count() , "B4" );
diff --git a/jstests/update_multi4.js b/jstests/update_multi4.js
new file mode 100644
index 0000000..e81a19a
--- /dev/null
+++ b/jstests/update_multi4.js
@@ -0,0 +1,18 @@
+
+t = db.update_mulit4;
+t.drop();
+
+for(i=0;i<1000;i++){
+ t.insert( { _id:i ,
+ k:i%12,
+ v:"v"+i%12 } );
+}
+
+t.ensureIndex({k:1})
+
+assert.eq( 84 , t.count({k:2,v:"v2"} ) , "A0" );
+
+t.update({k:2},{$set:{v:"two v2"}},false,true)
+
+assert.eq( 0 , t.count({k:2,v:"v2"} ) , "A1" );
+assert.eq( 84 , t.count({k:2,v:"two v2"} ) , "A2" );
diff --git a/jstests/update_multi5.js b/jstests/update_multi5.js
new file mode 100644
index 0000000..46ef8f3
--- /dev/null
+++ b/jstests/update_multi5.js
@@ -0,0 +1,17 @@
+
+t = db.update_multi5;
+
+t.drop()
+
+t.insert({path: 'r1', subscribers: [1,2]});
+t.insert({path: 'r2', subscribers: [3,4]});
+
+t.update({}, {$addToSet: {subscribers: 5}}, false, true);
+
+t.find().forEach(
+ function(z){
+ assert.eq( 3 , z.subscribers.length , z );
+ }
+);
+
+
diff --git a/jstests/upsert1.js b/jstests/upsert1.js
new file mode 100644
index 0000000..77cbf57
--- /dev/null
+++ b/jstests/upsert1.js
@@ -0,0 +1,14 @@
+
+t = db.upsert1;
+t.drop();
+
+t.update( { x : 1 } , { $inc : { y : 1 } } , true );
+l = db.getLastErrorCmd();
+assert( l.upserted , "A1" );
+assert.eq( l.upserted.str , t.findOne()._id.str , "A2" );
+
+t.update( { x : 2 } , { x : 2 , y : 3 } , true );
+l = db.getLastErrorCmd();
+assert( l.upserted , "B1" );
+assert.eq( l.upserted.str , t.findOne( { x : 2 } )._id.str , "B2" );
+assert.eq( 2 , t.find().count() , "B3" );
diff --git a/jstests/where3.js b/jstests/where3.js
new file mode 100644
index 0000000..c062ed1
--- /dev/null
+++ b/jstests/where3.js
@@ -0,0 +1,10 @@
+
+t = db.where3;
+t.drop()
+
+t.save( { returned_date : 5 } );
+t.save( { returned_date : 6 } );
+
+assert.eq( 1 , t.find( function(){ return this.returned_date == 5; } ).count() , "A" );
+assert.eq( 1 , t.find( { $where : "return this.returned_date == 5;" } ).count() , "B" );
+assert.eq( 1 , t.find( { $where : "this.returned_date == 5;" } ).count() , "C" );