summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
Diffstat (limited to 'jstests')
-rw-r--r--jstests/_tst.js41
-rw-r--r--jstests/apitest_db.js5
-rw-r--r--jstests/array4.js30
-rw-r--r--jstests/arrayfind3.js21
-rw-r--r--jstests/auth/auth1.js2
-rw-r--r--jstests/basic3.js32
-rw-r--r--jstests/big_object1.js46
-rw-r--r--jstests/capped3.js6
-rw-r--r--jstests/capped6.js25
-rw-r--r--jstests/capped7.js19
-rw-r--r--jstests/capped8.js86
-rw-r--r--jstests/check_shard_index.js45
-rw-r--r--jstests/conc_update.js45
-rw-r--r--jstests/coveredIndex1.js59
-rw-r--r--jstests/coveredIndex2.js18
-rw-r--r--jstests/cursora.js41
-rw-r--r--jstests/datasize3.js8
-rw-r--r--jstests/dbcase.js4
-rw-r--r--jstests/disk/directoryperdb.js4
-rw-r--r--jstests/disk/diskfull.js12
-rw-r--r--jstests/disk/killall.js42
-rw-r--r--jstests/disk/preallocate.js8
-rw-r--r--jstests/disk/preallocate2.js6
-rw-r--r--jstests/disk/preallocate_directoryperdb.js50
-rw-r--r--jstests/distinct1.js2
-rw-r--r--jstests/distinct_array1.js1
-rw-r--r--jstests/distinct_index1.js50
-rw-r--r--jstests/distinct_index2.js35
-rw-r--r--jstests/drop2.js43
-rw-r--r--jstests/drop_index.js (renamed from jstests/dropIndex.js)0
-rwxr-xr-xjstests/dur/a_quick.js123
-rw-r--r--jstests/dur/closeall.js80
-rw-r--r--jstests/dur/diskfull.js136
-rw-r--r--jstests/dur/dropdb.js163
-rwxr-xr-xjstests/dur/dur1.js154
-rw-r--r--jstests/dur/dur2.js92
-rwxr-xr-xjstests/dur/lsn.js126
-rwxr-xr-xjstests/dur/manyRestart.js191
-rw-r--r--jstests/dur/md5.js101
-rwxr-xr-xjstests/dur/oplog.js159
-rw-r--r--jstests/error5.js2
-rw-r--r--jstests/eval_nolock.js16
-rw-r--r--jstests/evalc.js14
-rw-r--r--jstests/evald.js68
-rw-r--r--jstests/evale.js5
-rw-r--r--jstests/evalf.js26
-rw-r--r--jstests/exists.js3
-rw-r--r--jstests/explain1.js2
-rw-r--r--jstests/explain2.js6
-rw-r--r--jstests/explain3.js24
-rw-r--r--jstests/find_and_modify3.js4
-rw-r--r--jstests/geo_borders.js189
-rw-r--r--jstests/geo_center_sphere1.js93
-rw-r--r--jstests/geo_circle2.js3
-rw-r--r--jstests/geo_circle2a.js36
-rw-r--r--jstests/geo_near_random1.js12
-rw-r--r--jstests/geo_near_random2.js21
-rw-r--r--jstests/geo_sort1.js22
-rw-r--r--jstests/geo_update1.js38
-rw-r--r--jstests/geo_update2.js40
-rw-r--r--jstests/geof.js19
-rw-r--r--jstests/group6.js1
-rw-r--r--jstests/in3.js2
-rw-r--r--jstests/in4.js4
-rw-r--r--jstests/index11.js13
-rw-r--r--jstests/index_check6.js45
-rw-r--r--jstests/index_check7.js2
-rw-r--r--jstests/index_many2.js2
-rw-r--r--jstests/index_sparse1.js46
-rw-r--r--jstests/index_sparse2.js21
-rw-r--r--jstests/indexh.js7
-rw-r--r--jstests/indexi.js16
-rw-r--r--jstests/indexj.js44
-rw-r--r--jstests/insert2.js8
-rw-r--r--jstests/jni2.js4
-rw-r--r--jstests/killop.js43
-rw-r--r--jstests/libs/concurrent.js30
-rw-r--r--jstests/libs/fun.js32
-rw-r--r--jstests/libs/geo_near_random.js78
-rw-r--r--jstests/libs/grid.js172
-rw-r--r--jstests/libs/network.js37
-rwxr-xr-xjstests/misc/biginsert.js18
-rw-r--r--jstests/mr1.js22
-rw-r--r--jstests/mr2.js27
-rw-r--r--jstests/mr3.js10
-rw-r--r--jstests/mr4.js4
-rw-r--r--jstests/mr5.js4
-rw-r--r--jstests/mr_bigobject.js13
-rw-r--r--jstests/mr_comments.js28
-rw-r--r--jstests/mr_errorhandling.js8
-rw-r--r--jstests/mr_index.js43
-rw-r--r--jstests/mr_index2.js22
-rw-r--r--jstests/mr_index3.js50
-rw-r--r--jstests/mr_killop.js127
-rw-r--r--jstests/mr_merge.js51
-rw-r--r--jstests/mr_optim.js47
-rw-r--r--jstests/mr_outreduce.js41
-rw-r--r--jstests/mr_outreduce2.js27
-rw-r--r--jstests/mr_replaceIntoDB.js45
-rw-r--r--jstests/mr_sort.js6
-rw-r--r--jstests/multiClient/rsDurKillRestart1.js139
-rw-r--r--jstests/ne2.js21
-rw-r--r--jstests/ne3.js12
-rw-r--r--jstests/not2.js5
-rw-r--r--jstests/notablescan.js22
-rw-r--r--jstests/objid5.js9
-rw-r--r--jstests/or4.js11
-rw-r--r--jstests/or6.js14
-rw-r--r--jstests/orc.js29
-rw-r--r--jstests/ord.js34
-rw-r--r--jstests/ore.js13
-rw-r--r--jstests/orf.js15
-rw-r--r--jstests/parallel/del.js79
-rw-r--r--jstests/parallel/repl.js4
-rw-r--r--jstests/perf/geo_near1.js11
-rw-r--r--jstests/profile1.js7
-rw-r--r--jstests/proj_key1.js28
-rw-r--r--jstests/pull_remove1.js14
-rw-r--r--jstests/push2.js2
-rw-r--r--jstests/queryoptimizer2.js62
-rw-r--r--jstests/regex3.js2
-rw-r--r--jstests/regex6.js4
-rw-r--r--jstests/regex9.js2
-rw-r--r--jstests/remove_undefined.js28
-rw-r--r--jstests/rename4.js121
-rw-r--r--jstests/repl/basic1.js4
-rw-r--r--jstests/repl/block2.js15
-rw-r--r--jstests/repl/mastermaster1.js8
-rw-r--r--jstests/repl/pair1.js4
-rw-r--r--jstests/repl/repl1.js2
-rw-r--r--jstests/repl/repl11.js4
-rw-r--r--jstests/repl/repl2.js2
-rw-r--r--jstests/repl/snapshot3.js4
-rw-r--r--jstests/replsets/auth1.js184
-rw-r--r--jstests/replsets/buildindexes.js86
-rw-r--r--jstests/replsets/cloneDb.js52
-rw-r--r--jstests/replsets/config1.js21
-rw-r--r--jstests/replsets/fastsync.js117
-rw-r--r--jstests/replsets/getlasterror_w2.js36
-rw-r--r--jstests/replsets/groupAndMapReduce.js105
-rw-r--r--jstests/replsets/initial_sync1.js129
-rw-r--r--jstests/replsets/initial_sync2.js179
-rw-r--r--jstests/replsets/initial_sync3.js87
-rw-r--r--jstests/replsets/ismaster1.js36
-rw-r--r--jstests/replsets/key11
-rw-r--r--jstests/replsets/key21
-rw-r--r--jstests/replsets/remove1.js132
-rw-r--r--jstests/replsets/replset2.js252
-rw-r--r--jstests/replsets/replset3.js130
-rw-r--r--jstests/replsets/replset5.js42
-rw-r--r--jstests/replsets/replset_remove_node.js9
-rw-r--r--jstests/replsets/replsetarb2.js2
-rw-r--r--jstests/replsets/replsetarb3.js144
-rw-r--r--jstests/replsets/replsetfreeze.js105
-rw-r--r--jstests/replsets/rollback.js333
-rw-r--r--jstests/replsets/rollback2.js423
-rwxr-xr-xjstests/replsets/rollback3.js39
-rw-r--r--jstests/replsets/rslib.js63
-rw-r--r--jstests/replsets/slaveDelay2.js104
-rw-r--r--jstests/replsets/slavedelay1.js127
-rw-r--r--jstests/replsets/sync1.js30
-rw-r--r--jstests/replsets/sync_passive.js89
-rw-r--r--jstests/replsets/sync_passive2.js120
-rw-r--r--jstests/replsets/toostale.js121
-rwxr-xr-xjstests/replsets/two_initsync.js1
-rw-r--r--jstests/replsets/twosets.js19
-rw-r--r--jstests/rs/rs_basic.js88
-rw-r--r--jstests/set_param1.js9
-rw-r--r--jstests/sharding/addshard3.js9
-rw-r--r--jstests/sharding/addshard4.js24
-rw-r--r--jstests/sharding/auto1.js5
-rw-r--r--jstests/sharding/bigMapReduce.js62
-rw-r--r--jstests/sharding/count1.js10
-rw-r--r--jstests/sharding/cursor1.js2
-rw-r--r--jstests/sharding/features1.js24
-rw-r--r--jstests/sharding/features2.js29
-rw-r--r--jstests/sharding/features3.js3
-rw-r--r--jstests/sharding/geo_near_random1.js37
-rw-r--r--jstests/sharding/geo_near_random2.js44
-rw-r--r--jstests/sharding/key_many.js6
-rw-r--r--jstests/sharding/key_string.js13
-rw-r--r--jstests/sharding/limit_push.js47
-rw-r--r--jstests/sharding/migrateBig.js45
-rw-r--r--jstests/sharding/multi_mongos1.js70
-rw-r--r--jstests/sharding/rename.js1
-rw-r--r--jstests/sharding/shard1.js1
-rw-r--r--jstests/sharding/shard3.js36
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js89
-rw-r--r--jstests/sharding/sort1.js46
-rw-r--r--jstests/sharding/splitpick.js39
-rw-r--r--jstests/sharding/sync1.js15
-rw-r--r--jstests/sharding/update1.js7
-rw-r--r--jstests/sharding/version1.js40
-rw-r--r--jstests/sharding/version2.js35
-rw-r--r--jstests/shellkillop.js83
-rw-r--r--jstests/shellspawn.js2
-rw-r--r--jstests/shellstartparallel.js17
-rwxr-xr-xjstests/slowNightly/32bit.js125
-rw-r--r--jstests/slowNightly/btreedel.js43
-rw-r--r--jstests/slowNightly/capped4.js2
-rw-r--r--jstests/slowNightly/command_line_parsing.js9
-rw-r--r--jstests/slowNightly/dur_big_atomic_update.js31
-rw-r--r--jstests/slowNightly/dur_passthrough.js89
-rw-r--r--jstests/slowNightly/dur_remove_old_journals.js53
-rw-r--r--jstests/slowNightly/geo_near_random1.js13
-rw-r--r--jstests/slowNightly/geo_near_random2.js21
-rw-r--r--jstests/slowNightly/index_check9.js118
-rw-r--r--jstests/slowNightly/large_chunk.js51
-rwxr-xr-xjstests/slowNightly/moveprimary-replset.js67
-rw-r--r--jstests/slowNightly/newcollection2.js11
-rw-r--r--jstests/slowNightly/sharding_balance1.js2
-rw-r--r--jstests/slowNightly/sharding_balance2.js2
-rw-r--r--jstests/slowNightly/sharding_balance3.js4
-rw-r--r--jstests/slowNightly/sharding_balance4.js43
-rw-r--r--jstests/slowNightly/sharding_balance_randomorder1.js54
-rw-r--r--jstests/slowNightly/sharding_cursors1.js6
-rw-r--r--jstests/slowNightly/sharding_multiple_collections.js53
-rw-r--r--jstests/slowNightly/sharding_passthrough.js (renamed from jstests/slowNightly/run_sharding_passthrough.js)6
-rw-r--r--jstests/slowNightly/sharding_rs1.js13
-rw-r--r--jstests/slowNightly/sharding_rs2.js163
-rw-r--r--jstests/slowNightly/unix_socket1.js26
-rw-r--r--jstests/slowWeekly/conc_update.js29
-rw-r--r--jstests/slowWeekly/disk_reuse1.js41
-rw-r--r--jstests/slowWeekly/dur_passthrough.js44
-rw-r--r--jstests/slowWeekly/geo_near_random1.js13
-rw-r--r--jstests/slowWeekly/geo_near_random2.js21
-rw-r--r--jstests/slowWeekly/indexbg_dur.js67
-rw-r--r--jstests/slowWeekly/query_yield1.js6
-rw-r--r--jstests/slowWeekly/query_yield2.js6
-rw-r--r--jstests/slowWeekly/update_yield1.js21
-rw-r--r--jstests/sort2.js2
-rw-r--r--jstests/splitvector.js144
-rw-r--r--jstests/temp_cleanup.js (renamed from jstests/tempCleanup.js)2
-rw-r--r--jstests/tool/dumprestore2.js3
-rw-r--r--jstests/tool/dumprestore3.js60
-rw-r--r--jstests/tool/dumprestore4.js42
-rw-r--r--jstests/tool/tool1.js2
-rw-r--r--jstests/ts1.js38
-rw-r--r--jstests/update_addToSet3.js18
-rw-r--r--jstests/update_arraymatch6.js14
-rw-r--r--jstests/update_multi6.js10
241 files changed, 9501 insertions, 1029 deletions
diff --git a/jstests/_tst.js b/jstests/_tst.js
new file mode 100644
index 0000000..f208164
--- /dev/null
+++ b/jstests/_tst.js
@@ -0,0 +1,41 @@
+/* a general testing framework (helpers) for us in the jstests/
+
+ to use, from your test file:
+ testname="mytestname";
+ load("jstests/_tst.js");
+*/
+
+if( typeof tst == "undefined" ) {
+ tst = {}
+
+ tst.log = function (optional_msg) {
+ print("\n\nstep " + ++this._step + " " + (optional_msg || ""));
+ }
+
+ tst.success = function () {
+ print(testname + " SUCCESS");
+ }
+
+ /* diff files a and b, returning the difference (empty str if no difference) */
+ tst.diff = function(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+ }
+}
+
+print(testname + " BEGIN");
+tst._step = 0;
diff --git a/jstests/apitest_db.js b/jstests/apitest_db.js
index f54879c..c734d67 100644
--- a/jstests/apitest_db.js
+++ b/jstests/apitest_db.js
@@ -70,3 +70,8 @@ assert( asserted, "should have asserted" );
dd( "g" );
+
+
+assert.eq( "foo" , db.getSisterDB( "foo" ).getName() )
+assert.eq( "foo" , db.getSiblingDB( "foo" ).getName() )
+
diff --git a/jstests/array4.js b/jstests/array4.js
new file mode 100644
index 0000000..1053e16
--- /dev/null
+++ b/jstests/array4.js
@@ -0,0 +1,30 @@
+
+t = db.array4;
+t.drop();
+
+t.insert({"a": ["1", "2", "3"]});
+t.insert({"a" : ["2", "1"]});
+
+var x = {'a.0' : /1/};
+
+assert.eq(t.count(x), 1);
+
+assert.eq(t.findOne(x).a[0], 1);
+assert.eq(t.findOne(x).a[1], 2);
+
+t.drop();
+
+t.insert({"a" : {"0" : "1"}});
+t.insert({"a" : ["2", "1"]});
+
+assert.eq(t.count(x), 1);
+assert.eq(t.findOne(x).a[0], 1);
+
+t.drop();
+
+t.insert({"a" : ["0", "1", "2", "3", "4", "5", "6", "1", "1", "1", "2", "3", "2", "1"]});
+t.insert({"a" : ["2", "1"]});
+
+x = {"a.12" : /2/};
+assert.eq(t.count(x), 1);
+assert.eq(t.findOne(x).a[0], 0);
diff --git a/jstests/arrayfind3.js b/jstests/arrayfind3.js
new file mode 100644
index 0000000..60da713
--- /dev/null
+++ b/jstests/arrayfind3.js
@@ -0,0 +1,21 @@
+
+t = db.arrayfind3;
+t.drop()
+
+t.save({a:[1,2]})
+t.save({a:[1, 2, 6]})
+t.save({a:[1, 4, 6]})
+
+
+assert.eq( 2 , t.find( {a:{$gte:3, $lte: 5}} ).itcount() , "A1" )
+assert.eq( 1 , t.find( {a:{$elemMatch:{$gte:3, $lte: 5}}} ).itcount() , "A2" )
+
+t.ensureIndex( { a : 1 } )
+
+printjson( t.find( {a:{$gte:3, $lte: 5}} ).explain() );
+
+//assert.eq( 2 , t.find( {a:{$gte:3, $lte: 5}} ).itcount() , "B1" ); // SERVER-1264
+assert.eq( 1 , t.find( {a:{$elemMatch:{$gte:3, $lte: 5}}} ).itcount() , "B2" )
+
+
+
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index 6fc6dc5..2f2a1b4 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -68,6 +68,6 @@ if ( db.runCommand( "features" ).readlock ){
initial: { count: 0 }
};
- assert.throws( function() { return t.group( p ) }, "write reduce didn't fail" );
+ assert.throws( function() { return t.group( p ) }, null , "write reduce didn't fail" );
}
diff --git a/jstests/basic3.js b/jstests/basic3.js
index 2deee2b..4488865 100644
--- a/jstests/basic3.js
+++ b/jstests/basic3.js
@@ -3,14 +3,13 @@ t = db.getCollection( "foo_basic3" );
t.find( { "a.b" : 1 } ).toArray();
-ok = false;
+ok = true;
try{
t.save( { "a.b" : 5 } );
ok = false;
}
catch ( e ){
- ok = true;
}
assert( ok , ". in names aren't allowed doesn't work" );
@@ -19,6 +18,33 @@ try{
ok = false;
}
catch ( e ){
- ok = true;
}
assert( ok , ". in embedded names aren't allowed doesn't work" );
+
+// following tests make sure update keys are checked
+t.save({"a": 0,"b": 1})
+try {
+ t.update({"a": 0}, {"b.b": 1});
+ ok = false;
+} catch (e) {}
+assert( ok , "must deny '.' in key of update" );
+
+// upsert with embedded doc
+try {
+ t.update({"a": 10}, {"b": { "c.c" : 1 }}, true);
+ ok = false;
+} catch (e) {}
+assert( ok , "must deny '.' in key of update" );
+
+// if it is a modifier, it should still go through
+t.update({"a": 0}, {$set: { "c.c": 1}})
+t.update({"a": 0}, {$inc: { "c.c": 1}})
+
+// edge cases
+try {
+ t.update({"a": 0}, {"": { "c.c": 1}})
+ ok = false;
+} catch (e) {}
+assert( ok , "must deny '.' in key of update" );
+t.update({"a": 0}, {})
+
diff --git a/jstests/big_object1.js b/jstests/big_object1.js
new file mode 100644
index 0000000..be841e0
--- /dev/null
+++ b/jstests/big_object1.js
@@ -0,0 +1,46 @@
+
+t = db.big_object1
+t.drop();
+
+if ( db.adminCommand( "buildinfo" ).bits == 64 ){
+
+ s = ""
+ while ( s.length < 850 * 1024 ){
+ s += "x";
+ }
+
+ x = 0;
+ while ( true ){
+ n = { _id : x , a : [] }
+ for ( i=0; i<14+x; i++ )
+ n.a.push( s )
+ try {
+ t.insert( n )
+ o = n
+ }
+ catch ( e ){
+ break;
+ }
+
+ if ( db.getLastError() != null )
+ break;
+ x++;
+ }
+
+ printjson( t.stats(1024*1024) )
+
+ assert.lt( 15 * 1024 * 1024 , Object.bsonsize( o ) , "A1" )
+ assert.gt( 17 * 1024 * 1024 , Object.bsonsize( o ) , "A2" )
+
+ assert.eq( x , t.count() , "A3" )
+
+ for ( i=0; i<x; i++ ){
+ o = t.findOne( { _id : 1 } )
+ assert( o , "B" + i );
+ }
+
+ t.drop()
+}
+else {
+ print( "skipping big_object1 b/c not 64-bit" )
+}
diff --git a/jstests/capped3.js b/jstests/capped3.js
index d6d2b23..2e5e679 100644
--- a/jstests/capped3.js
+++ b/jstests/capped3.js
@@ -23,9 +23,9 @@ c = t2.find().sort( {$natural:-1} );
i = 999;
while( c.hasNext() ) {
assert.eq( i--, c.next().i, "E" );
-}
-//print( "i: " + i );
-var str = tojson( t2.stats() );
+}
+//print( "i: " + i );
+var str = tojson( t2.stats() );
//print( "stats: " + tojson( t2.stats() ) );
assert( i < 990, "F" );
diff --git a/jstests/capped6.js b/jstests/capped6.js
index 851bbd1..6579807 100644
--- a/jstests/capped6.js
+++ b/jstests/capped6.js
@@ -1,3 +1,5 @@
+// Test NamespaceDetails::cappedTruncateAfter via 'captrunc' command
+
Random.setRandomSeed();
db.capped6.drop();
@@ -8,6 +10,12 @@ function debug( x ) {
// print( x );
}
+/**
+ * Check that documents in the collection are in order according to the value
+ * of a, which corresponds to the insert order. This is a check that the oldest
+ * document(s) is/are deleted when space is needed for the newest document. The
+ * check is performed in both forward and reverse directions.
+ */
function checkOrder( i ) {
res = tzz.find().sort( { $natural: -1 } );
assert( res.hasNext(), "A" );
@@ -30,12 +38,18 @@ function checkOrder( i ) {
var val = new Array( 500 );
var c = "";
for( i = 0; i < 500; ++i, c += "-" ) {
+ // The a values are strings of increasing length.
val[ i ] = { a: c };
}
var oldMax = Random.randInt( 500 );
var max = 0;
+/**
+ * Insert new documents until there are 'oldMax' documents in the collection,
+ * then remove a random number of documents (often all but one) via one or more
+ * 'captrunc' requests.
+ */
function doTest() {
for( var i = max; i < oldMax; ++i ) {
tzz.save( val[ i ] );
@@ -48,7 +62,13 @@ function doTest() {
min = Random.randInt( count ) + 1;
}
+ // Iteratively remove a random number of documents until we have no more
+ // than 'min' documents.
while( count > min ) {
+ // 'n' is the number of documents to remove - we must account for the
+ // possibility that 'inc' will be true, and avoid removing all documents
+ // from the collection in that case, as removing all documents is not
+ // allowed by 'captrunc'
var n = Random.randInt( count - min - 1 ); // 0 <= x <= count - min - 1
var inc = Random.rand() > 0.5;
debug( count + " " + n + " " + inc );
@@ -58,10 +78,13 @@ function doTest() {
}
count -= n;
max -= n;
+ // Validate the remaining documents.
checkOrder( max - 1 );
}
}
+// Repeatedly add up to 'oldMax' documents and then truncate the newest
+// documents. Newer documents take up more space than older documents.
for( var i = 0; i < 10; ++i ) {
doTest();
}
@@ -77,6 +100,8 @@ db.capped6.drop();
db._dbCommand( { create: "capped6", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
tzz = db.capped6;
+// Same test as above, but now the newer documents take less space than the
+// older documents instead of more.
for( var i = 0; i < 10; ++i ) {
doTest();
}
diff --git a/jstests/capped7.js b/jstests/capped7.js
index ecb689e..693828d 100644
--- a/jstests/capped7.js
+++ b/jstests/capped7.js
@@ -1,3 +1,5 @@
+// Test NamespaceDetails::emptyCappedCollection via 'emptycapped' command
+
Random.setRandomSeed();
db.capped7.drop();
@@ -8,6 +10,10 @@ var ten = new Array( 11 ).toString().replace( /,/g, "-" );
count = 0;
+/**
+ * Insert new documents until the capped collection loops and the document
+ * count doesn't increase on insert.
+ */
function insertUntilFull() {
count = tzz.count();
var j = 0;
@@ -23,21 +29,27 @@ while( 1 ) {
insertUntilFull();
+// oldCount == count before empty
oldCount = count;
assert.eq.automsg( "11", "tzz.stats().numExtents" );
+
+// oldSize == size before empty
var oldSize = tzz.stats().storageSize;
assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+// check that collection storage parameters are the same after empty
assert.eq.automsg( "11", "tzz.stats().numExtents" );
assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+// check that the collection is empty after empty
assert.eq.automsg( "0", "tzz.find().itcount()" );
assert.eq.automsg( "0", "tzz.count()" );
+// check that we can reuse the empty collection, inserting as many documents
+// as we were able to the first time through.
insertUntilFull();
-
assert.eq.automsg( "oldCount", "count" );
assert.eq.automsg( "oldCount", "tzz.find().itcount()" );
assert.eq.automsg( "oldCount", "tzz.count()" );
@@ -47,12 +59,16 @@ var oldSize = tzz.stats().storageSize;
assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+// check that the collection storage parameters are unchanged after another empty
assert.eq.automsg( "11", "tzz.stats().numExtents" );
assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+// insert an arbitrary number of documents
var total = Random.randInt( 2000 );
for( var j = 1; j <= total; ++j ) {
tzz.save( {i:ten,j:j} );
+ // occasionally check that only the oldest documents are removed to make room
+ // for the newest documents
if ( Random.rand() > 0.95 ) {
assert.automsg( "j >= tzz.count()" );
assert.eq.automsg( "tzz.count()", "tzz.find().itcount()" );
@@ -62,6 +78,7 @@ for( var j = 1; j <= total; ++j ) {
while( c.hasNext() ) {
assert.eq.automsg( "c.next().j", "k--" );
}
+ // check the same thing with a reverse iterator as well
var c = tzz.find().sort( {$natural:1} );
assert.automsg( "c.hasNext()" );
while( c.hasNext() ) {
diff --git a/jstests/capped8.js b/jstests/capped8.js
new file mode 100644
index 0000000..cce0eec
--- /dev/null
+++ b/jstests/capped8.js
@@ -0,0 +1,86 @@
+// Test NamespaceDetails::cappedTruncateAfter with empty extents
+
+Random.setRandomSeed();
+
+t = db.jstests_capped8;
+
+function debug( x ) {
+// printjson( x );
+}
+
+/** Generate an object with a string field of specified length */
+function obj( size ) {
+ return {a:new Array( size + 1 ).toString()};;
+}
+
+function withinOne( a, b ) {
+ assert( Math.abs( a - b ) <= 1, "not within one: " + a + ", " + b )
+}
+
+/**
+ * Insert enough documents of the given size spec that the collection will
+ * contain only documents having this size spec.
+ */
+function insertMany( size ) {
+ // Add some variability, as the precise number can trigger different cases.
+ n = 250 + Random.randInt( 10 );
+ for( i = 0; i < n; ++i ) {
+ t.save( obj( size ) );
+ debug( t.count() );
+ }
+}
+
+/**
+ * Insert some documents in such a way that there may be an empty extent, then
+ * truncate the capped collection.
+ */
+function insertAndTruncate( first ) {
+ myInitialCount = t.count();
+ // Insert enough documents to make the capped allocation loop over.
+ insertMany( 50 );
+ myFiftyCount = t.count();
+ // Insert documents that are too big to fit in the smaller extents.
+ insertMany( 2000 );
+ myTwokCount = t.count();
+ if ( first ) {
+ initialCount = myInitialCount;
+ fiftyCount = myFiftyCount;
+ twokCount = myTwokCount;
+ // Sanity checks for collection count
+ assert( fiftyCount > initialCount );
+ assert( fiftyCount > twokCount );
+ } else {
+ // Check that we are able to insert roughly the same number of documents
+ // after truncating. The exact values are slightly variable as a result
+ // of the capped allocation algorithm.
+ withinOne( initialCount, myInitialCount );
+ withinOne( fiftyCount, myFiftyCount );
+ withinOne( twokCount, myTwokCount );
+ }
+ count = t.count();
+ // Check that we can truncate the collection successfully.
+ assert.commandWorked( db.runCommand( { captrunc:"jstests_capped8", n:count - 1, inc:false } ) );
+}
+
+/** Test truncating and subsequent inserts */
+function testTruncate() {
+ insertAndTruncate( true );
+ insertAndTruncate( false );
+ insertAndTruncate( false );
+}
+
+t.drop();
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 1000 ] } );
+testTruncate();
+
+t.drop();
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 1000 ] } );
+testTruncate();
+
+t.drop();
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000 ] } );
+testTruncate();
+
+t.drop();
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } );
+testTruncate();
diff --git a/jstests/check_shard_index.js b/jstests/check_shard_index.js
new file mode 100644
index 0000000..a5a1fc1
--- /dev/null
+++ b/jstests/check_shard_index.js
@@ -0,0 +1,45 @@
+// -------------------------
+// CHECKSHARDINGINDEX TEST UTILS
+// -------------------------
+
+f = db.jstests_shardingindex;
+f.drop();
+
+
+// -------------------------
+// Case 1: all entries filled or empty should make a valid index
+//
+
+f.drop();
+f.ensureIndex( { x: 1 , y: 1 } );
+assert.eq( 0 , f.count() , "1. initial count should be zero" );
+
+res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} , force: true });
+assert.eq( true , res.ok, "1a" );
+
+f.save( { x: 1 , y : 1 } );
+assert.eq( 1 , f.count() , "1. count after initial insert should be 1" );
+res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} , force: true });
+assert.eq( true , res.ok , "1b" );
+
+
+// -------------------------
+// Case 2: entry with null values would make an index unsuitable
+//
+
+f.drop();
+f.ensureIndex( { x: 1 , y: 1 } );
+assert.eq( 0 , f.count() , "2. initial count should be zero" );
+
+f.save( { x: 1 , y : 1 } );
+f.save( { x: null , y : 1 } );
+
+res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} , force: true });
+assert.eq( true , res.ok , "2a " + tojson(res) );
+
+f.save( { y: 2 } );
+assert.eq( 3 , f.count() , "2. count after initial insert should be 3" );
+res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} , force: true });
+assert.eq( false , res.ok , "2b " + tojson(res) );
+
+print("PASSED");
diff --git a/jstests/conc_update.js b/jstests/conc_update.js
deleted file mode 100644
index ac70861..0000000
--- a/jstests/conc_update.js
+++ /dev/null
@@ -1,45 +0,0 @@
-// db = db.getSisterDB("concurrency")
-// db.dropDatabase();
-//
-// NRECORDS=10*1024*1024 // this needs to be relatively big so that
-// // the update() will take a while.
-//
-// print("loading data (will take a while; progress msg every 1024*1024 documents)")
-// for (i=0; i<(10*1024*1024); i++) {
-// db.conc.insert({x:i})
-// if ((i%(1024*1024))==0)
-// print("loaded " + i/(1024*1024) + " mibi-records")
-// }
-//
-// print("making an index (will take a while)")
-// db.conc.ensureIndex({x:1})
-//
-// var c1=db.conc.count({x:{$lt:NRECORDS}})
-// // this is just a flag that the child will toggle when it's done.
-// db.concflag.update({}, {inprog:true}, true)
-//
-// updater=startParallelShell("db=db.getSisterDB('concurrency');\
-// db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
-// print(db.getLastError());\
-// db.concflag.update({},{inprog:false})");
-//
-// querycount=0;
-// decrements=0;
-// misses=0
-// while (1) {
-// if (db.concflag.findOne().inprog) {
-// c2=db.conc.count({x:{$lt:10*1024*1024}})
-// print(c2)
-// querycount++;
-// if (c2<c1)
-// decrements++;
-// else
-// misses++;
-// c1 = c2;
-// } else
-// break;
-// sleep(10);
-// }
-// print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
-//
-// updater() // wait()
diff --git a/jstests/coveredIndex1.js b/jstests/coveredIndex1.js
new file mode 100644
index 0000000..1d6fe36
--- /dev/null
+++ b/jstests/coveredIndex1.js
@@ -0,0 +1,59 @@
+
+t = db["jstests_coveredIndex1"];
+t.drop();
+
+t.save({fn: "john", ln: "doe"})
+t.save({fn: "jack", ln: "doe"})
+t.save({fn: "john", ln: "smith"})
+t.save({fn: "jack", ln: "black"})
+t.save({fn: "bob", ln: "murray"})
+t.save({fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}})
+assert.eq( t.findOne({ln: "doe"}).fn, "john", "Cannot find right record" );
+assert.eq( t.count(), 6, "Not right length" );
+
+// use simple index
+t.ensureIndex({ln: 1});
+assert.eq( t.find({ln: "doe"}).explain().indexOnly, false, "Find using covered index but all fields are returned");
+assert.eq( t.find({ln: "doe"}, {ln: 1}).explain().indexOnly, false, "Find using covered index but _id is returned");
+assert.eq( t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+
+// use compound index
+t.dropIndex({ln: 1})
+t.ensureIndex({ln: 1, fn: 1});
+// return 1 field
+assert.eq( t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+// return both fields, multiple docs returned
+assert.eq( t.find({ln: "doe"}, {ln: 1, fn: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+// match 1 record using both fields
+assert.eq( t.find({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+// change ordering
+assert.eq( t.find({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+// ask from 2nd index key
+assert.eq( t.find({fn: "john"}, {fn: 1, _id: 0}).explain().indexOnly, false, "Find is using covered index, but doesnt have 1st key");
+
+// repeat above but with _id field
+t.dropIndex({ln: 1, fn: 1})
+t.ensureIndex({_id: 1, ln: 1});
+// return 1 field
+assert.eq( t.find({_id: 123}, {_id: 1}).explain().indexOnly, true, "Find is not using covered index");
+// match 1 record using both fields
+assert.eq( t.find({_id: 123, ln: "doe"}, {ln: 1}).explain().indexOnly, true, "Find is not using covered index");
+// change ordering
+assert.eq( t.find({ln: "doe", _id: 123}, {ln: 1, _id: 1}).explain().indexOnly, true, "Find is not using covered index");
+// ask from 2nd index key
+assert.eq( t.find({ln: "doe"}, {ln: 1}).explain().indexOnly, false, "Find is using covered index, but doesnt have 1st key");
+
+// repeat above but with embedded obj
+t.dropIndex({_id: 1, ln: 1})
+t.ensureIndex({obj: 1});
+assert.eq( t.find({"obj.a": 1}, {obj: 1}).explain().indexOnly, false, "Shouldnt use index when introspecting object");
+assert.eq( t.find({obj: {a: 1, b: "blah"}}).explain().indexOnly, false, "Index doesnt have all fields to cover");
+assert.eq( t.find({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+
+// repeat above but with index on sub obj field
+t.dropIndex({obj: 1});
+t.ensureIndex({"obj.a": 1, "obj.b": 1})
+assert.eq( t.find({"obj.a": 1}, {obj: 1}).explain().indexOnly, false, "Shouldnt use index when introspecting object");
+
+assert(t.validate().valid);
+
diff --git a/jstests/coveredIndex2.js b/jstests/coveredIndex2.js
new file mode 100644
index 0000000..f01c0eb
--- /dev/null
+++ b/jstests/coveredIndex2.js
@@ -0,0 +1,18 @@
+t = db["jstests_coveredIndex2"];
+t.drop();
+
+t.save({a: 1})
+t.save({a: 2})
+assert.eq( t.findOne({a: 1}).a, 1, "Cannot find right record" );
+assert.eq( t.count(), 2, "Not right length" );
+
+// use simple index
+t.ensureIndex({a: 1});
+assert.eq( t.find({a:1}).explain().indexOnly, false, "Find using covered index but all fields are returned");
+assert.eq( t.find({a:1}, {a: 1}).explain().indexOnly, false, "Find using covered index but _id is returned");
+assert.eq( t.find({a:1}, {a: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
+
+// add multikey
+t.save({a:[3,4]})
+assert.eq( t.find({a:1}, {ln: 1, _id: 0}).explain().indexOnly, false, "Find is using covered index even after multikey insert");
+
diff --git a/jstests/cursora.js b/jstests/cursora.js
index 0916fa7..a46688a 100644
--- a/jstests/cursora.js
+++ b/jstests/cursora.js
@@ -1,34 +1,47 @@
-
t = db.cursora
-
-
function run( n , atomic ){
-
+ if( !isNumber(n) ) {
+ print("n:");
+ printjson(n);
+ assert(isNumber(n), "cursora.js isNumber");
+ }
t.drop()
for ( i=0; i<n; i++ )
t.insert( { _id : i } )
db.getLastError()
+ print("cursora.js startParallelShell n:"+n+" atomic:"+atomic)
join = startParallelShell( "sleep(50); db.cursora.remove( {" + ( atomic ? "$atomic:true" : "" ) + "} ); db.getLastError();" );
-
- start = new Date()
- num = t.find( function(){ num = 2; for ( var x=0; x<1000; x++ ) num += 2; return num > 0; } ).sort( { _id : -1 } ).limit(n).itcount()
- end = new Date()
+ var start = null;
+ var ex = null;
+ var num = null;
+ var end = null;
+ try {
+ start = new Date()
+ ex = t.find(function () { num = 2; for (var x = 0; x < 1000; x++) num += 2; return num > 0; }).sort({ _id: -1 }).explain()
+ num = ex.n
+ end = new Date()
+ }
+ catch (e) {
+ print("cursora.js FAIL " + e);
+ join();
+ throw e;
+ }
+
join()
- print( "num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
- assert.eq( 0 , t.count() , "after remove" )
+ //print( "cursora.js num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
+ assert.eq( 0 , t.count() , "after remove: " + tojson( ex ) )
+ // assert.lt( 0 , ex.nYields , "not enough yields : " + tojson( ex ) ); // TODO make this more reliable so cen re-enable assert
if ( n == num )
- print( "warning: shouldn't have counted all n: " + n + " num: " + num );
+ print( "cursora.js warning: shouldn't have counted all n: " + n + " num: " + num );
}
run( 1500 )
run( 5000 )
-
run( 1500 , true )
run( 5000 , true )
-
-
+print("cursora.js SUCCESS")
diff --git a/jstests/datasize3.js b/jstests/datasize3.js
index d45f34b..df79e6d 100644
--- a/jstests/datasize3.js
+++ b/jstests/datasize3.js
@@ -22,10 +22,12 @@ t.ensureIndex( { x : 1 } )
for ( i=2; i<100; i++ )
t.insert( { x : i } )
-a = run( { min : { x : 20 } , max : { x : 50 } } )
-b = run( { min : { x : 20 } , max : { x : 50 } , estimate : true } )
+a = run( { min : { x : 20 } , max : { x : 50 } } ).size
+b = run( { min : { x : 20 } , max : { x : 50 } , estimate : true } ).size
-assert.eq( a.size , b.size );
+ratio = Math.min( a , b ) / Math.max( a , b );
+
+assert.lt( 0.97 , ratio , "sizes not equal a: " + a + " b: " + b );
diff --git a/jstests/dbcase.js b/jstests/dbcase.js
index d76b739..21854d8 100644
--- a/jstests/dbcase.js
+++ b/jstests/dbcase.js
@@ -1,4 +1,6 @@
+/*
+TODO SERVER-2111
a = db.getSisterDB( "dbcasetest_dbnamea" )
b = db.getSisterDB( "dbcasetest_dbnameA" )
@@ -19,5 +21,5 @@ a.dropDatabase();
b.dropDatabase();
print( db.getMongo().getDBNames() )
-
+*/
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 90a1f03..3b65bd0 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -9,7 +9,7 @@ db[ baseName ].save( {} );
assert.eq( 1, db[ baseName ].count() , "A : " + tojson( db[baseName].find().toArray() ) );
checkDir = function( dir ) {
- db.runCommand( {fsync:1} );
+ db.adminCommand( {fsync:1} );
files = listFiles( dir );
found = false;
for( f in files ) {
@@ -60,3 +60,5 @@ assert( m.getDBs().totalSize > 0, "bad size calc" );
db.dropDatabase();
files = listFiles( dbpath );
files.forEach( function( f ) { assert( !new RegExp( baseName ).test( f.name ), "drop database - dir not cleared" ); } );
+
+print("SUCCESS directoryperdb.js");
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 6cbcbb7..26b707d 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -1,19 +1,25 @@
doIt = false;
+dbpath = "/data/db/diskfulltest";
+
files = listFiles( "/data/db" );
for ( i in files ) {
- if ( files[ i ].name == "/data/db/diskfulltest" ) {
+ if ( files[ i ].name == dbpath ) {
doIt = true;
}
}
if ( !doIt ) {
- print( "path /data/db/diskfulltest/ missing, skipping diskfull test" );
+ print( "path " + dbpath + " missing, skipping diskfull test" );
doIt = false;
}
if ( doIt ) {
+ // Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does
+ files = listFiles( dbpath );
+ files.forEach( function( x ) { removeFile( x.name ) } );
+
port = allocatePorts( 1 )[ 0 ];
- m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/diskfulltest", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+ m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" )
c.save( { a: 6 } );
assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
new file mode 100644
index 0000000..a1487bb
--- /dev/null
+++ b/jstests/disk/killall.js
@@ -0,0 +1,42 @@
+// running ops should be killed
+// dropped collection should be ok after restart
+
+if ( typeof _threadInject == "undefined" ) { // don't run in v8 mode - SERVER-2076
+
+port = allocatePorts( 1 )[ 0 ]
+
+var baseName = "jstests_disk_killall";
+
+var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface" );
+
+m.getDB( "test" ).getCollection( baseName ).save( {} );
+m.getDB( "test" ).getLastError();
+
+s1 = startParallelShell( "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", port );
+sleep( 1000 );
+
+s2 = startParallelShell( "db." + baseName + ".drop()", port );
+sleep( 1000 );
+
+/**
+ * 12 == mongod's exit code on interrupt (eg standard kill)
+ * stopMongod sends a standard kill signal to mongod, then waits for mongod to stop. If mongod doesn't stop
+ * in a reasonable amount of time, stopMongod sends kill -9 and in that case will not return 12. We're checking
+ * in this assert that mongod will stop quickly even while evaling an infinite loop in server side js.
+ *
+ * 14 is sometimes returned instead due to SERVER-2184
+ */
+exitCode = stopMongod( port );
+assert( exitCode == 12 || exitCode == 14, "got unexpected exitCode: " + exitCode );
+
+s1();
+s2();
+
+var m = startMongoProgram( "mongod", "--port", port, "--dbpath", "/data/db/" + baseName );
+
+m.getDB( "test" ).getCollection( baseName ).stats();
+m.getDB( "test" ).getCollection( baseName ).drop();
+
+stopMongod( port );
+
+} \ No newline at end of file
diff --git a/jstests/disk/preallocate.js b/jstests/disk/preallocate.js
index d772fbb..4f35866 100644
--- a/jstests/disk/preallocate.js
+++ b/jstests/disk/preallocate.js
@@ -2,7 +2,7 @@
port = allocatePorts( 1 )[ 0 ];
-var baseName = "jstests_preallocate2";
+var baseName = "jstests_preallocate";
var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName );
@@ -10,7 +10,11 @@ assert.eq( 0, m.getDBs().totalSize );
m.getDB( baseName ).createCollection( baseName + "1" );
-assert.soon( function() { return m.getDBs().totalSize > 100000000; }, "expected second file to bring total size over 100MB" );
+expectedMB = 100;
+if ( m.getDB( baseName ).serverBits() < 64 )
+ expectedMB /= 4;
+
+assert.soon( function() { return m.getDBs().totalSize > expectedMB * 1000000; }, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB" );
stopMongod( port );
diff --git a/jstests/disk/preallocate2.js b/jstests/disk/preallocate2.js
index ee9382c..9b2159f 100644
--- a/jstests/disk/preallocate2.js
+++ b/jstests/disk/preallocate2.js
@@ -8,4 +8,8 @@ var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName );
m.getDB( baseName )[ baseName ].save( {i:1} );
-assert.soon( function() { return m.getDBs().totalSize > 100000000; }, "expected second file to bring total size over 100MB" ); \ No newline at end of file
+expectedMB = 100;
+if ( m.getDB( baseName ).serverBits() < 64 )
+ expectedMB /= 4;
+
+assert.soon( function() { return m.getDBs().totalSize > expectedMB * 1000000; }, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB" );
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
new file mode 100644
index 0000000..fd92aaf
--- /dev/null
+++ b/jstests/disk/preallocate_directoryperdb.js
@@ -0,0 +1,50 @@
+/**
+ * Test for SERVER-2417 - should not preallocate a database file while we are
+ * dropping its directory in directoryperdb mode.
+ */
+
+var baseDir = "jstests_disk_preallocate_directoryperdb";
+var baseName = "preallocate_directoryperdb"
+var baseName2 = "preallocate_directoryperdb2"
+var baseName3 = "preallocate_directoryperdb3"
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseDir + "/";
+
+function checkDb2DirAbsent() {
+ files = listFiles( dbpath );
+// printjson( files );
+ for( var f in files ) {
+ var name = files[ f ].name;
+ assert.eq( -1, name.indexOf( dbpath + baseName2 ), "baseName2 dir still present" );
+ }
+}
+
+var m = startMongod( "--smallfiles", "--directoryperdb", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+db2 = m.getDB( baseName2 );
+c = db[ baseName ];
+c2 = db2[ baseName2 ];
+big = new Array( 5000 ).toString();
+for( var i = 0; i < 3000; ++i ) {
+ c.save( { b:big } );
+ c2.save( { b:big } );
+ db.getLastError();
+}
+
+// Due to our write pattern, we expect db2's .3 file to be queued up in the file
+// allocator behind db's .3 file at the time db2 is dropped. This will
+// (incorrectly) cause db2's dir to be recreated until SERVER-2417 is fixed.
+db2.dropDatabase();
+
+checkDb2DirAbsent();
+
+db.dropDatabase();
+
+// Try writing a new database, to ensure file allocator is still working.
+db3 = m.getDB( baseName3 );
+c3 = db[ baseName3 ];
+c3.save( {} );
+assert( !db3.getLastError() );
+assert.eq( 1, c3.count() );
+
+checkDb2DirAbsent();
diff --git a/jstests/distinct1.js b/jstests/distinct1.js
index 433e051..5e47400 100644
--- a/jstests/distinct1.js
+++ b/jstests/distinct1.js
@@ -2,6 +2,8 @@
t = db.distinct1;
t.drop();
+assert.eq( 0 , t.distinct( "a" ).length , "test empty" );
+
t.save( { a : 1 } )
t.save( { a : 2 } )
t.save( { a : 2 } )
diff --git a/jstests/distinct_array1.js b/jstests/distinct_array1.js
index 0d41b80..f654dba 100644
--- a/jstests/distinct_array1.js
+++ b/jstests/distinct_array1.js
@@ -21,4 +21,5 @@ t.save( { a : [] , c : 12 } );
t.save( { a : { b : "z"} , c : 12 } );
res = t.distinct( "a.b" );
+res.sort()
assert.eq( "a,b,c,d,e,f,z" , res.toString() , "B1" );
diff --git a/jstests/distinct_index1.js b/jstests/distinct_index1.js
new file mode 100644
index 0000000..8677457
--- /dev/null
+++ b/jstests/distinct_index1.js
@@ -0,0 +1,50 @@
+
+t = db.distinct_index1
+t.drop();
+
+function r( x ){
+ return Math.floor( Math.sqrt( x * 123123 ) ) % 10;
+}
+
+function d( k , q ){
+ return t.runCommand( "distinct" , { key : k , query : q || {} } )
+}
+
+for ( i=0; i<1000; i++ ){
+ o = { a : r(i*5) , b : r(i) };
+ t.insert( o );
+}
+
+x = d( "a" );
+assert.eq( 1000 , x.stats.n , "AA1" )
+assert.eq( 1000 , x.stats.nscanned , "AA2" )
+assert.eq( 1000 , x.stats.nscannedObjects , "AA3" )
+
+x = d( "a" , { a : { $gt : 5 } } );
+assert.eq( 398 , x.stats.n , "AB1" )
+assert.eq( 1000 , x.stats.nscanned , "AB2" )
+assert.eq( 1000 , x.stats.nscannedObjects , "AB3" )
+
+x = d( "b" , { a : { $gt : 5 } } );
+assert.eq( 398 , x.stats.n , "AC1" )
+assert.eq( 1000 , x.stats.nscanned , "AC2" )
+assert.eq( 1000 , x.stats.nscannedObjects , "AC3" )
+
+
+
+t.ensureIndex( { a : 1 } )
+
+x = d( "a" );
+assert.eq( 1000 , x.stats.n , "BA1" )
+assert.eq( 1000 , x.stats.nscanned , "BA2" )
+assert.eq( 0 , x.stats.nscannedObjects , "BA3" )
+
+x = d( "a" , { a : { $gt : 5 } } );
+assert.eq( 398 , x.stats.n , "BB1" )
+assert.eq( 398 , x.stats.nscanned , "BB2" )
+assert.eq( 0 , x.stats.nscannedObjects , "BB3" )
+
+x = d( "b" , { a : { $gt : 5 } } );
+assert.eq( 398 , x.stats.n , "BC1" )
+assert.eq( 398 , x.stats.nscanned , "BC2" )
+assert.eq( 398 , x.stats.nscannedObjects , "BC3" )
diff --git a/jstests/distinct_index2.js b/jstests/distinct_index2.js
new file mode 100644
index 0000000..2ba65f9
--- /dev/null
+++ b/jstests/distinct_index2.js
@@ -0,0 +1,35 @@
+t = db.distinct_index2;
+t.drop();
+
+t.ensureIndex( { a : 1 , b : 1 } )
+t.ensureIndex( { c : 1 } )
+
+function x(){
+ return Math.floor( Math.random() * 10 );
+}
+
+for ( i=0; i<2000; i++ ){
+ t.insert( { a : x() , b : x() , c : x() } )
+}
+
+correct = []
+for ( i=0; i<10; i++ )
+ correct.push( i )
+
+function check( field ){
+ res = t.distinct( field )
+ res = res.sort()
+ assert.eq( correct , res , "check: " + field );
+
+ if ( field != "a" ){
+ res = t.distinct( field , { a : 1 } )
+ res = res.sort()
+ assert.eq( correct , res , "check 2: " + field );
+ }
+}
+
+check( "a" )
+check( "b" )
+check( "c" )
+
+
diff --git a/jstests/drop2.js b/jstests/drop2.js
new file mode 100644
index 0000000..fa239fd
--- /dev/null
+++ b/jstests/drop2.js
@@ -0,0 +1,43 @@
+t = db.jstests_drop2;
+t.drop();
+
+function debug( x ) {
+// printjson( x );
+}
+
+t.save( {} );
+db.getLastError();
+
+function op( drop ) {
+ p = db.currentOp().inprog;
+ debug( p );
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( drop ) {
+ if ( o.active && o.query && o.query.drop && o.query.drop == "jstests_drop2" ) {
+ return o.opid;
+ }
+ } else {
+ if ( o.active && o.query && o.query.query && o.query.query.$where && o.ns == "test.jstests_drop2" ) {
+ return o.opid;
+ }
+ }
+ }
+ return null;
+}
+
+s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { ; } } } )" );
+countOp = null;
+assert.soon( function() { countOp = op( false ); return countOp; } );
+
+s2 = startParallelShell( "db.jstests_drop2.drop()" );
+dropOp = null;
+assert.soon( function() { dropOp = op( true ); return dropOp; } );
+
+db.killOp( dropOp );
+db.killOp( countOp );
+
+s1();
+s2();
+
+t.drop(); // in SERVER-1818, this fails
diff --git a/jstests/dropIndex.js b/jstests/drop_index.js
index a6e5f46..a6e5f46 100644
--- a/jstests/dropIndex.js
+++ b/jstests/drop_index.js
diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js
new file mode 100755
index 0000000..f703f3f
--- /dev/null
+++ b/jstests/dur/a_quick.js
@@ -0,0 +1,123 @@
+/* quick.js
+ test durability
+ this file should always run quickly
+ other tests can be slow
+*/
+
+testname = "a_quick";
+load("jstests/_tst.js");
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+// directories
+var path1 = "/data/db/quicknodur";
+var path2 = "/data/db/quickdur";
+
+// non-durable version
+tst.log("start mongod without dur");
+var conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur");
+tst.log("without dur work");
+var d = conn.getDB("test");
+d.foo.insert({ _id:123 });
+d.getLastError();
+tst.log("stop without dur");
+stopMongod(30000);
+
+// durable version
+tst.log("start mongod with dur");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--durOptions", 8);
+tst.log("with dur work");
+d = conn.getDB("test");
+d.foo.insert({ _id: 123 });
+d.getLastError(); // wait
+
+// we could actually do getlasterror fsync:1 now, but maybe this is agood
+// as it will assure that commits happen on a timely basis. a bunch of the other dur/*js
+// tests use fsync
+tst.log("sleep a bit for a group commit");
+sleep(8000);
+
+// kill the process hard
+tst.log("kill -9 mongod");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// we will force removal of a datafile to be sure we can recreate everything
+// without it being present.
+removeFile(path2 + "/test.0");
+
+// for that to work, we can't skip anything though:
+removeFile(path2 + "/journal/lsn");
+
+// with the file deleted, we MUST start from the beginning of the journal.
+// thus this check to be careful
+var files = listFiles(path2 + "/journal/");
+if (files.some(function (f) { return f.name.indexOf("lsn") >= 0; })) {
+ print("\n\n\n");
+ print(path2);
+ printjson(files);
+ assert(false, "a journal/lsn file is present which will make this test potentially fail.");
+}
+
+// restart and recover
+tst.log("restart and recover");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--durOptions", 9);
+tst.log("check data results");
+d = conn.getDB("test");
+
+var countOk = (d.foo.count() == 1);
+if (!countOk) {
+ print("\n\n\na_quick.js FAIL count " + d.foo.count() + " is wrong\n\n\n");
+ // keep going - want to see if the diff matches. if so the sleep() above was too short?
+}
+
+tst.log("stop");
+stopMongod(30002);
+
+// at this point, after clean shutdown, there should be no journal files
+tst.log("check no journal files");
+checkNoJournalFiles(path2 + "/journal");
+
+tst.log("check data matches");
+var diff = tst.diff(path1 + "/test.ns", path2 + "/test.ns");
+print("diff of .ns files returns:" + diff);
+
+function showfiles() {
+ print("\n\nERROR: files for dur and nodur do not match");
+ print(path1 + " files:");
+ printjson(listFiles(path1));
+ print(path2 + " files:");
+ printjson(listFiles(path2));
+ print();
+}
+
+if (diff != "") {
+ showfiles();
+ assert(diff == "", "error test.ns files differ");
+}
+
+diff = tst.diff(path1 + "/test.0", path2 + "/test.0");
+print("diff of .0 files returns:" + diff);
+if (diff != "") {
+ showfiles();
+ assert(diff == "", "error test.0 files differ");
+}
+
+assert(countOk, "a_quick.js document count after recovery was not the expected value");
+
+tst.success();
diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js
new file mode 100644
index 0000000..f169f06
--- /dev/null
+++ b/jstests/dur/closeall.js
@@ -0,0 +1,80 @@
+// testing closealldatabases concurrency
+// this is also a test of recoverFromYield() as that will get exercised by the update
+
+function f() {
+ var variant = (new Date()) % 4;
+ var path = "/data/db/closeall";
+ var path2 = "/data/db/closeall_slave";
+ var ourdb = "closealltest";
+
+ print("closeall.js start mongod variant:" + variant);
+ var options = (new Date()-0)%2==0 ? 8 : 0;
+ print("closeall.js --durOptions " + options);
+ var N = 1000;
+ if (options)
+ N = 300;
+
+ // use replication to exercise that code too with a close, and also to test local.sources with a close
+ var conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOptions", options, "--master", "--oplogSize", 64);
+ var connSlave = startMongodEmpty("--port", 30002, "--dbpath", path2, "--dur", "--durOptions", options, "--slave", "--source", "localhost:30001");
+
+ var slave = connSlave.getDB(ourdb);
+
+ // we'll use two connections to make a little parallelism
+ var db1 = conn.getDB(ourdb);
+ var db2 = new Mongo(db1.getMongo().host).getDB(ourdb);
+
+ print("closeall.js run test");
+
+ for( var i = 0; i < N; i++ ) {
+ db1.foo.insert({x:1}); // this does wait for a return code so we will get some parallelism
+ if( i % 7 == 0 )
+ db1.foo.insert({x:99, y:2});
+ if( i % 49 == 0 )
+ db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 });
+ if (i % 100 == 0)
+ db1.foo.find();
+ if( i == 800 )
+ db1.foo.ensureIndex({ x: 1 });
+ var res = null;
+ try {
+ if( variant == 1 )
+ sleep(0);
+ else if( variant == 2 )
+ sleep(1);
+ else if( variant == 3 && i % 10 == 0 )
+ print(i);
+ res = db2.adminCommand("closeAllDatabases");
+ }
+ catch (e) {
+ sleep(5000); // sleeping a little makes console output order prettier
+ print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
+ try {
+ print("getlasterror:");
+ printjson(db2.getLastErrorObj());
+ print("trying one more closealldatabases:");
+ res = db2.adminCommand("closeAllDatabases");
+ printjson(res);
+ }
+ catch (e) {
+ print("got another exception : " + e);
+ }
+ print("\n\n\n");
+ // sleep a little to capture possible mongod output?
+ sleep(2000);
+ throw e;
+ }
+ assert( res.ok, "closeAllDatabases res.ok=false");
+ }
+
+ print("closeall.js end test loop. slave.foo.count:");
+ print(slave.foo.count());
+
+ print("closeall.js shutting down servers");
+ stopMongod(30002);
+ stopMongod(30001);
+}
+
+f();
+sleep(500);
+print("SUCCESS closeall.js");
diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js
new file mode 100644
index 0000000..da45c20
--- /dev/null
+++ b/jstests/dur/diskfull.js
@@ -0,0 +1,136 @@
+/** Test running out of disk space with durability enabled */
+
+startPath = "/data/db/diskfulltest";
+recoverPath = "/data/db/dur_diskfull";
+
+doIt = false;
+files = listFiles( "/data/db" );
+for ( i in files ) {
+ if ( files[ i ].name == startPath ) {
+ doIt = true;
+ }
+}
+
+if ( !doIt ) {
+ print( "path " + startPath + " missing, skipping diskfull test" );
+ doIt = false;
+}
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */
+function clear() {
+ files = listFiles( startPath );
+ files.forEach( function( x ) { removeFile( x.name ) } );
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+function work() {
+ log("work");
+ try {
+ var d = conn.getDB("test");
+
+ big = new Array( 5000 ).toString();
+ for( i = 0; i < 10000; ++i ) {
+ d.foo.insert( { _id:i, b:big } );
+ }
+
+ d.getLastError();
+ } catch ( e ) {
+ print( e );
+ raise( e );
+ } finally {
+ log("endwork");
+ }
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ c = d.foo.count();
+ v = d.foo.validate();
+ // not much we can guarantee about the writes, just validate when possible
+ if ( c != 0 && !v.valid ) {
+ printjson( v );
+ print( c );
+ assert( v.valid );
+ assert.gt( c, 0 );
+ }
+}
+
+function runFirstMongodAndFillDisk() {
+ log();
+
+ clear();
+ conn = startMongodNoReset("--port", 30001, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc");
+
+ assert.throws( work, null, "no exception thrown when exceeding disk capacity" );
+ waitMongoProgramOnPort( 30001 );
+
+ // the above wait doesn't work on windows
+ sleep(5000);
+}
+
+function runSecondMongdAndRecover() {
+ // restart and recover
+ log();
+ conn = startMongodNoReset("--port", 30003, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc");
+ verify();
+
+ log("stop");
+ stopMongod(30003);
+
+ // stopMongod seems to be asynchronous (hmmm) so we sleep here.
+ sleep(5000);
+
+ // at this point, after clean shutdown, there should be no journal files
+ log("check no journal files");
+ checkNoJournalFiles(startPath + "/journal/");
+
+ log();
+}
+
+function someWritesInJournal() {
+ runFirstMongodAndFillDisk();
+ runSecondMongdAndRecover();
+}
+
+function noWritesInJournal() {
+ // It is too difficult to consistently trigger cases where there are no existing journal files due to lack of disk space, but
+ // if we were to test this case we would need to manualy remove the lock file.
+// removeFile( startPath + "/mongod.lock" );
+}
+
+if ( doIt ) {
+
+ var testname = "dur_diskfull";
+ var step = 1;
+ var conn = null;
+
+ someWritesInJournal();
+ noWritesInJournal();
+
+ print(testname + " SUCCESS");
+
+} \ No newline at end of file
diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js
new file mode 100644
index 0000000..7f82cd7
--- /dev/null
+++ b/jstests/dur/dropdb.js
@@ -0,0 +1,163 @@
+/* durability test dropping a database
+*/
+
+var debugging = false;
+var testname = "dropdb";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+}
+
+function log(str) {
+ if (str)
+ print("\n" + testname + " step " + step++ + " " + str);
+ else
+ print("\n" + testname + " step " + step++);
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work (add data, drop database)");
+
+ var e = conn.getDB("teste");
+ e.foo.insert({ _id: 99 });
+
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.bar.insert({ _id: 3, x: 22 });
+
+ d.dropDatabase();
+
+ d.foo.insert({ _id: 100 });
+
+ // assure writes applied in case we kill -9 on return from this function
+ assert(d.runCommand({ getlasterror: 1, fsync: 1 }).ok, "getlasterror not ok");
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ var count = d.foo.count();
+ if (count != 1) {
+ print("going to fail, count mismatch in verify()");
+ sleep(10000); // easier to read the output this way
+ print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count);
+ print(d.foo.count() + "\n\n\n");
+ assert(false);
+ }
+ assert(d.foo.findOne()._id == 100, "100");
+
+ print("dropdb.js teste.foo.findOne:");
+ printjson(conn.getDB("teste").foo.findOne());
+
+ var teste = conn.getDB("teste");
+ print("dropdb count " + teste.foo.count());
+ assert(teste.foo.findOne()._id == 99, "teste");
+
+}
+
+if (debugging) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ verify();
+ sleep(30000);
+ quit();
+}
+
+// directories
+var path1 = "/data/db/" + testname + "nodur";
+var path2 = "/data/db/" + testname + "dur";
+
+// non-durable version
+log("mongod nodur");
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
+work();
+verify();
+stopMongod(30000);
+
+// durable version
+log("mongod dur");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+work();
+verify();
+
+// kill the process hard
+log("kill 9");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// we will force removal of a datafile to be sure we can recreate everything.
+removeFile(path2 + "/test.0");
+// the trick above is only valid if journals haven't rotated out, and also if lsn isn't skipping
+removeFile(path2 + "/lsn");
+
+log("restart and recover");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 9);
+
+log("verify after recovery");
+verify();
+
+log("stop mongod 30002");
+stopMongod(30002);
+sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+print(testname + " SUCCESS");
+
diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js
new file mode 100755
index 0000000..4c8f1bf
--- /dev/null
+++ b/jstests/dur/dur1.js
@@ -0,0 +1,154 @@
+/*
+ test durability
+*/
+
+var debugging = false;
+var testname = "dur1";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
+ d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
+
+// d.a.update({ _id: 4 }, { $inc: { x: 1} });
+// d.a.reIndex();
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+
+ log("endwork");
+ return d;
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
+ }
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+// directories
+var path1 = "/data/db/" + testname+"nodur";
+var path2 = "/data/db/" + testname+"dur";
+
+// non-durable version
+log();
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
+work();
+stopMongod(30000);
+
+// durable version
+log();
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+work();
+
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// restart and recover
+log();
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+verify();
+
+log("stop");
+stopMongod(30002);
+
+// stopMongod seems to be asynchronous (hmmm) so we sleep here.
+sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+var diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+print(testname + " SUCCESS");
+
diff --git a/jstests/dur/dur2.js b/jstests/dur/dur2.js
new file mode 100644
index 0000000..dd0ab0f
--- /dev/null
+++ b/jstests/dur/dur2.js
@@ -0,0 +1,92 @@
+/* test durability
+ runs mongod, kill -9's, recovers
+*/
+
+var debugging = false;
+var testname = "dur2";
+var step = 1;
+var conn = null;
+
+var start = new Date();
+function howLongSecs() {
+ return (new Date() - start) / 1000;
+}
+
+function log(str) {
+ if(str)
+ print("\n" + testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ var mycount = d.foo.count();
+ //print("count:" + mycount);
+ assert(mycount>2, "count wrong");
+}
+
+function work() {
+ log("work");
+ x = 'x'; while(x.length < 1024) x+=x;
+ var d = conn.getDB("test");
+ d.foo.drop();
+ d.foo.insert({});
+
+ // go long enough we will have time to kill it later during recovery
+ var j = 2;
+ var MaxTime = 15;
+ if (Math.random() < 0.1) {
+ print("dur2.js DOING A LONGER (120 sec) PASS - if an error, try long pass to replicate");
+ MaxTime = 120;
+ }
+ while (1) {
+ d.foo.insert({ _id: j, z: x });
+ d.foo.update({ _id: j }, { $inc: { a: 1} });
+ if (j % 25 == 0)
+ d.foo.remove({ _id: j });
+ j++;
+ if( j % 3 == 0 )
+ d.foo.update({ _id: j }, { $inc: { a: 1} }, true);
+ if (j % 10000 == 0)
+ print(j);
+ if (howLongSecs() > MaxTime)
+ break;
+ }
+
+ verify();
+ d.runCommand({ getLastError: 1, fsync: 1 });
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+// directories
+var path = "/data/db/" + testname+"dur";
+
+log("run mongod with --dur");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64);
+work();
+
+log("kill -9");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+assert(listFiles(path + "/journal/").length > 0, "journal directory is unexpectantly empty after kill");
+
+// restart and recover
+log("restart mongod and recover");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8, "--master", "--oplogSize", 64);
+verify();
+
+log("stopping mongod 30002");
+stopMongod(30002);
+
+print(testname + " SUCCESS");
diff --git a/jstests/dur/lsn.js b/jstests/dur/lsn.js
new file mode 100755
index 0000000..505d8f5
--- /dev/null
+++ b/jstests/dur/lsn.js
@@ -0,0 +1,126 @@
+/* test durability, specifically last sequence number function
+ runs mongod, kill -9's, recovers
+ then writes more data and verifies with DurParanoid that it matches
+*/
+
+var debugging = false;
+var testname = "lsn";
+var step = 1;
+var conn = null;
+
+var start = new Date();
+function howLongSecs() {
+ return (new Date() - start) / 1000;
+}
+
+function log(str) {
+ if(str)
+ print("\n" + testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ var mycount = d.foo.count();
+ print("count:" + mycount);
+ assert(mycount>2, "count wrong");
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ x = 'x'; while(x.length < 1024) x+=x;
+ var d = conn.getDB("test");
+ d.foo.drop();
+ d.foo.insert({});
+
+ // go long enough we will have time to kill it later during recovery
+ var j = 2;
+ var MaxTime = 15;
+ if (Math.random() < 0.05) {
+ print("doing a longer pass");
+ MaxTime = 90;
+ }
+ while (1) {
+ d.foo.insert({ _id: j, z: x });
+ d.foo.update({ _id: j }, { $inc: { a: 1} });
+ if (j % 25 == 0)
+ d.foo.remove({ _id: j });
+ j++;
+ if( j % 3 == 0 )
+ d.foo.update({ _id: j }, { $inc: { a: 1} }, true);
+ if (j % 10000 == 0)
+ print(j);
+ if (howLongSecs() > MaxTime)
+ break;
+ }
+
+ verify();
+ d.runCommand({ getLastError: 1, fsync: 1 });
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+// directories
+var path2 = "/data/db/" + testname+"dur";
+
+// run mongod with a short --syncdelay to make LSN writing sooner
+log("run mongod --dur and a short --syncdelay");
+conn = startMongodEmpty("--syncdelay", 2, "--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64);
+work();
+
+log("wait a while for a sync and an lsn write");
+sleep(14); // wait for lsn write
+
+log("kill mongod -9");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// check that there is an lsn file
+{
+ var files = listFiles(path2 + "/journal/");
+ assert(files.some(function (f) { return f.name.indexOf("lsn") >= 0; }),
+ "lsn.js FAIL no lsn file found after kill, yet one is expected");
+}
+/*assert.soon(
+ function () {
+ var files = listFiles(path2 + "/journal/");
+ return files.some(function (f) { return f.name.indexOf("lsn") >= 0; });
+ },
+ "lsn.js FAIL no lsn file found after kill, yet one is expected"
+);*/
+
+// restart and recover
+log("restart mongod, recover, verify");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 24, "--master", "--oplogSize", 64);
+verify();
+
+// idea here is to verify (in a simplistic way) that we are in a good state to do further ops after recovery
+log("add data after recovery");
+{
+ var d = conn.getDB("test");
+ d.xyz.insert({ x: 1 });
+ d.xyz.insert({ x: 1 });
+ d.xyz.insert({ x: 1 });
+ d.xyz.update({}, { $set: { x: "aaaaaaaaaaaa"} });
+ d.xyz.reIndex();
+ d.xyz.drop();
+ sleep(1);
+ d.xyz.insert({ x: 1 });
+}
+
+log("stop mongod 30002");
+stopMongod(30002);
+
+print(testname + " SUCCESS");
diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js
new file mode 100755
index 0000000..04e4318
--- /dev/null
+++ b/jstests/dur/manyRestart.js
@@ -0,0 +1,191 @@
+/*
+ test durability
+*/
+
+var debugging = false;
+var testname = "manyRestarts";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
+ d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
+
+// d.a.update({ _id: 4 }, { $inc: { x: 1} });
+// d.a.reIndex();
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+ log("endwork");
+ return d;
+}
+
+function addRows() {
+ var rand = Random.randInt(10000);
+ log("add rows " + rand);
+ var d = conn.getDB("test");
+ for (var j = 0; j < rand; ++j) {
+ d.rows.insert({a:1, b: "blah"});
+ }
+ return rand;
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("test");
+ assert.eq(d.foo.count(), 2, "collection count is wrong");
+ assert.eq(d.a.count(), 2, "collection count is wrong");
+}
+
+function verifyRows(nrows) {
+ log("verify rows " + nrows);
+ var d = conn.getDB("test");
+ assert.eq(d.rows.count(), nrows, "collection count is wrong");
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+// directories
+var path1 = "/data/db/" + testname+"nodur";
+var path2 = "/data/db/" + testname+"dur";
+
+// non-durable version
+log("starting 30000");
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
+work();
+stopMongod(30000);
+
+log("starting 30001");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+work();
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+stopMongod(30001);
+sleep(5000);
+
+for (var i = 0; i < 3; ++i) {
+
+ // durable version
+ log("restarting 30001");
+ conn = startMongodNoReset("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+
+ // wait for group commit.
+ printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+ verify();
+
+ // kill the process hard
+ log("hard kill");
+ stopMongod(30001, /*signal*/9);
+
+ sleep(5000);
+}
+
+// journal file should be present, and non-empty as we killed hard
+
+// restart and recover
+log("restart");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+log("verify");
+verify();
+log("stop");
+stopMongod(30002);
+sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+var diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+var nrows = 0;
+for (var i = 0; i < 5; ++i) {
+
+ // durable version
+ log("restarting 30001");
+ conn = startMongodNoReset("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+ nrows += addRows();
+ // wait for group commit.
+ printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+ verifyRows(nrows);
+
+ // kill the process hard
+ log("hard kill");
+ stopMongod(30001, /*signal*/9);
+
+ sleep(5000);
+}
+
+print(testname + " SUCCESS");
+
diff --git a/jstests/dur/md5.js b/jstests/dur/md5.js
new file mode 100644
index 0000000..107476e
--- /dev/null
+++ b/jstests/dur/md5.js
@@ -0,0 +1,101 @@
+/**
+ * Test md5 validation of journal file.
+ * This test is dependent on the journal file format and may require an update if the format changes,
+ * see comments near fuzzFile() below.
+ */
+
+var debugging = false;
+var testname = "dur_md5";
+var step = 1;
+var conn = null;
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+/** Changes here may require updating the byte index of the md5 hash, see File comments below. */
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
+ d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
+
+ // d.a.update({ _id: 4 }, { $inc: { x: 1} });
+ // d.a.reIndex();
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+
+ log("endwork");
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+var path = "/data/db/" + testname+"dur";
+
+log();
+conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8);
+work();
+
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+log("kill -9");
+
+// kill the process hard
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// Bit flip the first byte of the md5sum contained within the opcode footer.
+// This ensures we get an md5 exception instead of some other type of exception.
+var file = path + "/journal/j._0";
+
+// if test fails, uncomment these "cp" lines to debug:
+// run("cp", file, "/tmp/before");
+
+// journal header is 8192
+// jsectheader is 20
+// so a little beyond that
+fuzzFile(file, 8214+8);
+
+// run("cp", file, "/tmp/after");
+
+log("run mongod again recovery should fail");
+
+// 100 exit code corresponds to EXIT_UNCAUGHT, which is triggered when there is an exception during recovery.
+// 14 is is sometimes triggered instead due to SERVER-2184
+exitCode = runMongoProgram( "mongod", "--port", 30002, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", /*9*/13 );
+
+if (exitCode != 100 && exitCode != 14) {
+ print("\n\n\nFAIL md5.js expected mongod to fail but didn't? mongod exitCode: " + exitCode + "\n\n\n");
+ // sleep a little longer to get more output maybe
+ sleep(2000);
+ assert(false);
+}
+
+// TODO Possibly we could check the mongod log to verify that the correct type of exception was thrown. But
+// that would introduce a dependency on the mongod log format, which we may not want.
+
+print("SUCCESS md5.js");
+
+// if we sleep a littler here we may get more out the mongod output logged
+sleep(500);
diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js
new file mode 100755
index 0000000..379c1b6
--- /dev/null
+++ b/jstests/dur/oplog.js
@@ -0,0 +1,159 @@
+/* oplog.js */
+
+var debugging = false;
+var testname = "oplog";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return runProgram("diff", a, b);
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+function verify() {
+ log("verify");
+ var d = conn.getDB("local");
+ var mycount = d.oplog.$main.find({ "o.z": 3 }).count();
+ print(mycount);
+ assert(mycount == 3, "oplog doesnt match");
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ var q = conn.getDB("testq"); // use tewo db's to exercise JDbContext a bit.
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ q.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ q.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+ // OpCode_ObjCopy fires on larger operations so make one that isn't tiny
+ var big = "axxxxxxxxxxxxxxb";
+ big = big + big;
+ big = big + big;
+ big = big + big;
+ big = big + big;
+ big = big + big;
+ d.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 });
+ q.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 });
+ d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 });
+ d.foo.update({ _id: 5 }, { $set: { z: 99} });
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+
+ log("endwork");
+
+ verify();
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+// directories
+var path1 = "/data/db/" + testname+"nodur";
+var path2 = "/data/db/" + testname+"dur";
+
+// non-durable version
+log();
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles", "--master", "--oplogSize", 64);
+work();
+stopMongod(30000);
+
+// durable version
+log();
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", /*DurParanoid*/8, "--master", "--oplogSize", 64);
+work();
+
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// restart and recover
+log();
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8, "--master", "--oplogSize", 64);
+verify();
+
+log("stop");
+stopMongod(30002);
+
+// stopMongod seems to be asynchronous (hmmm) so we sleep here.
+sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+print(testname + " SUCCESS");
diff --git a/jstests/error5.js b/jstests/error5.js
index ed8d922..5884d20 100644
--- a/jstests/error5.js
+++ b/jstests/error5.js
@@ -2,7 +2,7 @@
t = db.error5
t.drop();
-assert.throws( function(){ t.save( 4 ); } , "A" );
+assert.throws( function(){ t.save( 4 ); printjson( t.findOne() ) } , null , "A" );
t.save( { a : 1 } )
assert.eq( 1 , t.count() , "B" );
diff --git a/jstests/eval_nolock.js b/jstests/eval_nolock.js
new file mode 100644
index 0000000..2688ec5
--- /dev/null
+++ b/jstests/eval_nolock.js
@@ -0,0 +1,16 @@
+
+t = db.eval_nolock
+t.drop();
+
+for ( i=0; i<10; i++ )
+ t.insert( { _id : i } );
+
+res = db.runCommand( { eval :
+ function(){
+ db.eval_nolock.insert( { _id : 123 } );
+ return db.eval_nolock.count();
+ }
+ , nlock : true } );
+
+assert.eq( 11 , res.retval , "A" )
+
diff --git a/jstests/evalc.js b/jstests/evalc.js
index 59c9467..8a9e889 100644
--- a/jstests/evalc.js
+++ b/jstests/evalc.js
@@ -7,20 +7,6 @@ for( i = 0; i < 10; ++i ) {
// SERVER-1610
-function op() {
- uri = db.runCommand( "whatsmyuri" ).you;
- printjson( uri );
- p = db.currentOp().inprog;
- for ( var i in p ) {
- var o = p[ i ];
- if ( o.client == uri ) {
- print( "found it" );
- return o.opid;
- }
- }
- return -1;
-}
-
s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " )
print( "starting eval: " + Date() )
diff --git a/jstests/evald.js b/jstests/evald.js
new file mode 100644
index 0000000..78cabb6
--- /dev/null
+++ b/jstests/evald.js
@@ -0,0 +1,68 @@
+t = db.jstests_evald;
+t.drop();
+
+function debug( x ) {
+// printjson( x );
+}
+
+for( i = 0; i < 10; ++i ) {
+ t.save( {i:i} );
+}
+db.getLastError();
+
+function op( ev, where ) {
+ p = db.currentOp().inprog;
+ debug( p );
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( where ) {
+ if ( o.active && o.query && o.query.query && o.query.query.$where && o.ns == "test.jstests_evald" ) {
+ return o.opid;
+ }
+ } else {
+ if ( o.active && o.query && o.query.$eval && o.query.$eval == ev ) {
+ return o.opid;
+ }
+ }
+ }
+ return -1;
+}
+
+function doIt( ev, wait, where ) {
+
+ if ( where ) {
+ s = startParallelShell( ev );
+ } else {
+ s = startParallelShell( "db.eval( '" + ev + "' )" );
+ }
+
+ o = null;
+ assert.soon( function() { o = op( ev, where ); return o != -1 } );
+
+ if ( wait ) {
+ sleep( 2000 );
+ }
+
+ debug( "going to kill" );
+
+ db.killOp( o );
+
+ debug( "sent kill" );
+
+ s();
+
+}
+
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", true, true );
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", false, true );
+doIt( "while( true ) {;}", false );
+doIt( "while( true ) {;}", true );
+
+// the for loops are currently required, as a spawned op masks the parent op - see SERVER-1931
+doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( {i:10} ); }", true );
+doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( {i:10} ); }", false );
+doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(); }", true );
+doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(); }", false );
+
+doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} try { db.jstests_evald.count( {i:10} ); } catch ( e ) { } }", true );
+doIt( "while( 1 ) { try { while( 1 ) { ; } } catch ( e ) { } }", true );
diff --git a/jstests/evale.js b/jstests/evale.js
new file mode 100644
index 0000000..af5a303
--- /dev/null
+++ b/jstests/evale.js
@@ -0,0 +1,5 @@
+t = db.jstests_evale;
+t.drop();
+
+db.eval( function() { return db.jstests_evale.count( { $where:function() { return true; } } ) } );
+db.eval( "db.jstests_evale.count( { $where:function() { return true; } } )" ); \ No newline at end of file
diff --git a/jstests/evalf.js b/jstests/evalf.js
new file mode 100644
index 0000000..12d0192
--- /dev/null
+++ b/jstests/evalf.js
@@ -0,0 +1,26 @@
+// test that killing a parent op interrupts the child op
+
+t = db.jstests_evalf;
+t.drop();
+
+if ( typeof _threadInject == "undefined" ) { // don't run in v8 mode - SERVER-1900
+
+db.eval( function() {
+ opid = null;
+ while( opid == null ) {
+ ops = db.currentOp().inprog;
+ for( i in ops ) {
+ o = ops[ i ];
+ if ( o.active && o.query && o.query.$eval ) {
+ opid = o.opid;
+ }
+ }
+ }
+ db.jstests_evalf.save( {opid:opid} );
+ db.jstests_evalf.count( { $where:function() {
+ db.killOp( db.jstests_evalf.findOne().opid );
+ while( 1 ) { ; }
+ } } );
+ } );
+
+} \ No newline at end of file
diff --git a/jstests/exists.js b/jstests/exists.js
index 28f69e8..3f1e904 100644
--- a/jstests/exists.js
+++ b/jstests/exists.js
@@ -25,7 +25,7 @@ function dotest( n ){
assert.eq( 3, t.count( {'a.b': {$exists:true}} ) , n );
assert.eq( 2, t.count( {'a.b.c': {$exists:true}} ) , n );
assert.eq( 1, t.count( {'a.b.c.d': {$exists:true}} ) , n );
-
+
assert.eq( 1, t.count( {a: {$exists:false}} ) , n );
assert.eq( 2, t.count( {'a.b': {$exists:false}} ) , n );
assert.eq( 3, t.count( {'a.b.c': {$exists:false}} ) , n );
@@ -38,6 +38,7 @@ t.ensureIndex( { "a.b" : 1 } )
t.ensureIndex( { "a.b.c" : 1 } )
t.ensureIndex( { "a.b.c.d" : 1 } )
dotest( "after index" )
+assert.eq( 1, t.find( {a: {$exists:false}} ).hint( {a:1} ).itcount() );
t.drop();
diff --git a/jstests/explain1.js b/jstests/explain1.js
index 6d5ac55..2460c28 100644
--- a/jstests/explain1.js
+++ b/jstests/explain1.js
@@ -20,5 +20,5 @@ assert.eq( 20 , t.find( q ).limit(20).itcount() , "F" );
assert.eq( 49 , t.find(q).explain().n , "G" );
assert.eq( 20 , t.find(q).limit(20).explain().n , "H" );
-assert.eq( 49 , t.find(q).limit(-20).explain().n , "I" );
+assert.eq( 20 , t.find(q).limit(-20).explain().n , "I" );
diff --git a/jstests/explain2.js b/jstests/explain2.js
index 4960e5a..6cb5160 100644
--- a/jstests/explain2.js
+++ b/jstests/explain2.js
@@ -16,12 +16,12 @@ function go( q , c , b , o ){
}
q = { a : { $gt : 3 } }
-go( q , 6 , 7 , 6 );
+go( q , 6 , 6 , 6 );
q.b = 5
-go( q , 1 , 1 , 1 );
+go( q , 1 , 6 , 1 );
delete q.b
q.c = 5
-go( q , 1 , 7 , 6 );
+go( q , 1 , 6 , 6 );
diff --git a/jstests/explain3.js b/jstests/explain3.js
new file mode 100644
index 0000000..69dcac5
--- /dev/null
+++ b/jstests/explain3.js
@@ -0,0 +1,24 @@
+/** SERVER-2451 Kill cursor while explain is yielding */
+
+t = db.jstests_explain3;
+t.drop();
+
+t.ensureIndex( {i:1} );
+for( var i = 0; i < 10000; ++i ) {
+ t.save( {i:i,j:0} );
+}
+db.getLastError();
+
+s = startParallelShell( "sleep( 20 ); db.jstests_explain3.dropIndex( {i:1} );" );
+
+try {
+ t.find( {i:{$gt:-1},j:1} ).hint( {i:1} ).explain()
+} catch (e) {
+ print( "got exception" );
+ printjson( e );
+}
+
+s();
+
+// Sanity check to make sure mongod didn't seg fault.
+assert.eq( 10000, t.count() ); \ No newline at end of file
diff --git a/jstests/find_and_modify3.js b/jstests/find_and_modify3.js
index 1d30204..4214dfb 100644
--- a/jstests/find_and_modify3.js
+++ b/jstests/find_and_modify3.js
@@ -8,13 +8,13 @@ t.insert({_id:2, other:2, comments:[{i:0, j:0}, {i:1, j:1}]});
orig0 = t.findOne({_id:0})
orig2 = t.findOne({_id:2})
-out = t.findAndModify({query: {_id:1, 'comments.i':0}, update: {$set: {'comments.$.j':2}}, 'new': true});
+out = t.findAndModify({query: {_id:1, 'comments.i':0}, update: {$set: {'comments.$.j':2}}, 'new': true, sort:{other:1}});
assert.eq(out.comments[0], {i:0, j:2});
assert.eq(out.comments[1], {i:1, j:1});
assert.eq(t.findOne({_id:0}), orig0);
assert.eq(t.findOne({_id:2}), orig2);
-out = t.findAndModify({query: {other:1, 'comments.i':1}, update: {$set: {'comments.$.j':3}}, 'new': true});
+out = t.findAndModify({query: {other:1, 'comments.i':1}, update: {$set: {'comments.$.j':3}}, 'new': true, sort:{other:1}});
assert.eq(out.comments[0], {i:0, j:2});
assert.eq(out.comments[1], {i:1, j:3});
assert.eq(t.findOne({_id:0}), orig0);
diff --git a/jstests/geo_borders.js b/jstests/geo_borders.js
new file mode 100644
index 0000000..85ffe35
--- /dev/null
+++ b/jstests/geo_borders.js
@@ -0,0 +1,189 @@
+
+t = db.borders
+t.drop()
+
+// FIXME: FAILS for all epsilon < 1
+epsilon = 1
+//epsilon = 0.99
+
+// For these tests, *required* that step ends exactly on max
+min = -1
+max = 1
+step = 1
+numItems = 0;
+
+for(var x = min; x <= max; x += step){
+ for(var y = min; y <= max; y += step){
+ t.insert({ loc: { x : x, y : y } })
+ numItems++;
+ }
+}
+
+overallMin = -1
+overallMax = 1
+
+// Create a point index slightly smaller than the points we have
+t.ensureIndex({ loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2})
+assert(db.getLastError(), "A1")
+
+// FIXME: FAILS for all epsilon < 1
+// Create a point index only slightly bigger than the points we have
+t.ensureIndex({ loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon })
+assert.isnull(db.getLastError(), "A2")
+
+
+
+
+
+
+
+
+//************
+// Box Tests
+//************
+
+
+/*
+// FIXME: Fails w/ non-nice error
+// Make sure we can get all points in full bounds
+assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon,
+ overallMin - epsilon],
+ [overallMax + epsilon,
+ overallMax + epsilon]] } } }).count(), "B1");
+*/
+
+// Make sure an error is thrown if the bounds are bigger than the box itself
+// TODO: Do we really want an error in this case? Shouldn't we just clip the box?
+try{
+ t.findOne({ loc : { $within : { $box : [[overallMin - 2 * epsilon,
+ overallMin - 2 * epsilon],
+ [overallMax + 2 * epsilon,
+ overallMax + 2 * epsilon]] } } });
+ assert(false, "B2");
+}
+catch(e){}
+
+//Make sure we can get at least close to the bounds of the index
+assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon / 2,
+ overallMin - epsilon / 2],
+ [overallMax + epsilon / 2,
+ overallMax + epsilon / 2]] } } }).count(), "B3");
+
+
+//**************
+//Circle tests
+//**************
+
+center = (overallMax + overallMin) / 2
+center = [center, center]
+radius = overallMax
+
+offCenter = [center[0] + radius, center[1] + radius]
+onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon]
+offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon]
+
+
+//Make sure we can get all points when radius is exactly at full bounds
+assert(0 < t.find({ loc : { $within : { $center : [center, radius + epsilon] } } }).count(), "C1");
+
+//Make sure we can get points when radius is over full bounds
+assert(0 < t.find({ loc : { $within : { $center : [center, radius + 2 * epsilon] } } }).count(), "C2");
+
+//Make sure we can get points when radius is over full bounds, off-centered
+assert(0 < t.find({ loc : { $within : { $center : [offCenter, radius + 2 * epsilon] } } }).count(), "C3");
+
+//Make sure we get correct corner point when center is in bounds
+// (x bounds wrap, so could get other corner)
+cornerPt = t.findOne({ loc : { $within : { $center : [offCenter, step / 2] } } });
+assert(cornerPt.loc.y == overallMax, "C4")
+
+/*
+// FIXME: FAILS, returns opposite corner
+// Make sure we get correct corner point when center is on bounds
+cornerPt = t.findOne({ loc : { $within : { $center : [onBounds,
+ Math.sqrt(2 * epsilon * epsilon) + (step / 2) ] } } });
+assert(cornerPt.loc.y == overallMax, "C5")
+*/
+
+// TODO: Handle gracefully?
+// Make sure we can't get corner point when center is over bounds
+try{
+ t.findOne({ loc : { $within : { $center : [offBounds,
+ Math.sqrt(8 * epsilon * epsilon) + (step / 2) ] } } });
+ assert(false, "C6")
+}
+catch(e){}
+
+
+
+
+
+
+
+//***********
+//Near tests
+//***********
+
+//Make sure we can get all nearby points to point in range
+assert(t.find({ loc : { $near : offCenter } }).next().loc.y == overallMax,
+ "D1");
+
+/*
+// FIXME: FAILS, returns opposite list
+// Make sure we can get all nearby points to point on boundary
+assert(t.find({ loc : { $near : onBounds } }).next().loc.y == overallMax,
+ "D2");
+*/
+
+//TODO: Could this work?
+//Make sure we can't get all nearby points to point over boundary
+try{
+ t.findOne({ loc : { $near : offBounds } })
+ assert(false, "D3")
+}
+catch(e){}
+
+/*
+// FIXME: FAILS, returns only single point
+//Make sure we can get all nearby points within one step (4 points in top corner)
+assert(4 == t.find({ loc : { $near : offCenter, $maxDistance : step * 1.9 } }).count(),
+ "D4");
+*/
+
+
+
+//**************
+//Command Tests
+//**************
+
+
+//Make sure we can get all nearby points to point in range
+assert(db.runCommand({ geoNear : "borders", near : offCenter }).results[0].obj.loc.y == overallMax,
+ "E1");
+
+
+/*
+// FIXME: FAILS, returns opposite list
+//Make sure we can get all nearby points to point on boundary
+assert(db.runCommand({ geoNear : "borders", near : onBounds }).results[0].obj.loc.y == overallMax,
+ "E2");
+*/
+
+//TODO: Could this work?
+//Make sure we can't get all nearby points to point over boundary
+try{
+ db.runCommand({ geoNear : "borders", near : offBounds }).results.length
+ assert(false, "E3")
+}
+catch(e){}
+
+
+/*
+// FIXME: Fails, returns one point
+//Make sure we can get all nearby points within one step (4 points in top corner)
+assert(4 == db.runCommand({ geoNear : "borders", near : offCenter, maxDistance : step * 1.5 }).results.length,
+ "E4");
+*/
+
+
+
diff --git a/jstests/geo_center_sphere1.js b/jstests/geo_center_sphere1.js
new file mode 100644
index 0000000..dd7c98a
--- /dev/null
+++ b/jstests/geo_center_sphere1.js
@@ -0,0 +1,93 @@
+
+t = db.geo_center_sphere1;
+t.drop();
+
+skip = 3 // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4)
+
+searches = [
+ // x , y rad
+ [ [ 5 , 0 ] , 0.05 ] , // ~200 miles
+ [ [ 135 , 0 ] , 0.05 ] ,
+
+ [ [ 5 , 70 ] , 0.05 ] ,
+ [ [ 135 , 70 ] , 0.05 ] ,
+ [ [ 5 , 85 ] , 0.05 ] ,
+
+ [ [ 20 , 0 ] , 0.25 ] , // ~1000 miles
+ [ [ 20 , -45 ] , 0.25 ] ,
+ [ [ -20 , 60 ] , 0.25 ] ,
+ [ [ -20 , -70 ] , 0.25 ] ,
+];
+correct = searches.map( function(z){ return []; } );
+
+num = 0;
+
+for ( x=-179; x<=179; x += skip ){
+ for ( y=-89; y<=89; y += skip ){
+ o = { _id : num++ , loc : [ x , y ] }
+ t.save( o )
+ for ( i=0; i<searches.length; i++ ){
+ if ( Geo.sphereDistance( [ x , y ] , searches[i][0] ) <= searches[i][1])
+ correct[i].push( o );
+ }
+ }
+ gc(); // needed with low skip values
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+for ( i=0; i<searches.length; i++ ){
+ print('------------');
+ print( tojson( searches[i] ) + "\t" + correct[i].length )
+ q = { loc : { $within : { $centerSphere : searches[i] } } }
+
+ //correct[i].forEach( printjson )
+ //printjson( q );
+ //t.find( q ).forEach( printjson )
+
+ //printjson(t.find( q ).explain())
+
+ //printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
+ //printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
+
+ var numExpected = correct[i].length
+ var x = correct[i].map( function(z){ return z._id; } )
+ var y = t.find(q).map( function(z){ return z._id; } )
+
+ missing = [];
+ epsilon = 0.001; // allow tenth of a percent error due to conversions
+ for (var j=0; j<x.length; j++){
+ if (!Array.contains(y, x[j])){
+ missing.push(x[j]);
+ var obj = t.findOne({_id: x[j]});
+ var dist = Geo.sphereDistance(searches[i][0], obj.loc);
+ print("missing: " + tojson(obj) + " " + dist)
+ if ((Math.abs(dist - searches[i][1]) / dist) < epsilon)
+ numExpected -= 1;
+ }
+ }
+ for (var j=0; j<y.length; j++){
+ if (!Array.contains(x, y[j])){
+ missing.push(y[j]);
+ var obj = t.findOne({_id: y[j]});
+ var dist = Geo.sphereDistance(searches[i][0], obj.loc);
+ print("extra: " + tojson(obj) + " " + dist)
+ if ((Math.abs(dist - searches[i][1]) / dist) < epsilon)
+ numExpected += 1;
+ }
+ }
+
+
+ assert.eq( numExpected , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
+ assert.eq( numExpected , t.find( q ).count() , "count : " + tojson( searches[i] ) );
+ assert.gt( numExpected * 2 , t.find(q).explain().nscanned , "nscanned : " + tojson( searches[i] ) )
+}
+
+
+
+
+
+
+
+
+
diff --git a/jstests/geo_circle2.js b/jstests/geo_circle2.js
index 0232490..ef76884 100644
--- a/jstests/geo_circle2.js
+++ b/jstests/geo_circle2.js
@@ -21,3 +21,6 @@ t.insert({ "uid" : 355844 , "loc" : { "x" : 34 , "y" : -4} ,"categories" : [ "sp
assert.eq( 10 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}} } ).itcount() , "A" );
assert.eq( 6 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}}, "categories" : "sports" } ).itcount() , "B" );
+
+// When not a $near or $within query, geo index should not be used. Fails if geo index is used.
+assert.eq( 1 , t.find({ "loc" : { "x" : -36, "y" : -8}, "categories" : "sports" }).itcount(), "C" )
diff --git a/jstests/geo_circle2a.js b/jstests/geo_circle2a.js
new file mode 100644
index 0000000..1597033
--- /dev/null
+++ b/jstests/geo_circle2a.js
@@ -0,0 +1,36 @@
+// From SERVER-2381
+// Tests to make sure that nested multi-key indexing works for geo indexes and is not used for direct position
+// lookups
+
+db.test.drop()
+db.test.insert({ p : [1112,3473], t : [{ k : 'a', v : 'b' }, { k : 'c', v : 'd' }] })
+db.test.ensureIndex({ p : '2d', 't.k' : 1 }, { min : 0, max : 10000 })
+
+// Succeeds, since on direct lookup should not use the index
+assert(1 == db.test.find({p:[1112,3473],'t.k':'a'}).count(), "A")
+// Succeeds and uses the geo index
+assert(1 == db.test.find({p:{$within:{$box:[[1111,3472],[1113,3475]]}}, 't.k' : 'a' }).count(), "B")
+
+
+db.test.drop()
+db.test.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' }, { k : 'key2', v : 123 } ] })
+db.test.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' } ] })
+
+db.test.ensureIndex({ point : "2d" , "tags.k" : 1, "tags.v" : 1 })
+
+// Succeeds, since should now lookup multi-keys correctly
+assert(2 == db.test.find({ point : { $within : { $box : [[0,0],[12,12]] } } }).count(), "C")
+// Succeeds, and should not use geoindex
+assert(2 == db.test.find({ point : [1, 10] }).count(), "D")
+assert(2 == db.test.find({ point : [1, 10], "tags.v" : "value" }).count(), "E")
+assert(1 == db.test.find({ point : [1, 10], "tags.v" : 123 }).count(), "F")
+
+
+db.test.drop()
+db.test.insert({ point:[ 1, 10 ], tags : [ { k : { 'hello' : 'world'}, v : 'value' }, { k : 'key2', v : 123 } ] })
+db.test.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' } ] })
+
+db.test.ensureIndex({ point : "2d" , "tags.k" : 1, "tags.v" : 1 })
+
+// Succeeds, should be able to look up the complex element
+assert(1 == db.test.find({ point : { $within : { $box : [[0,0],[12,12]] } }, 'tags.k' : { 'hello' : 'world' } }).count(), "G") \ No newline at end of file
diff --git a/jstests/geo_near_random1.js b/jstests/geo_near_random1.js
new file mode 100644
index 0000000..50539f3
--- /dev/null
+++ b/jstests/geo_near_random1.js
@@ -0,0 +1,12 @@
+// this tests all points using $near
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("geo_near_random1");
+
+test.insertPts(50);
+
+test.testPt([0,0]);
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
diff --git a/jstests/geo_near_random2.js b/jstests/geo_near_random2.js
new file mode 100644
index 0000000..1673abb
--- /dev/null
+++ b/jstests/geo_near_random2.js
@@ -0,0 +1,21 @@
+// this tests 1% of all points using $near and $nearSphere
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("geo_near_random2");
+
+test.insertPts(5000);
+
+opts = {sphere:0, nToTest:test.nPts*0.01};
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
diff --git a/jstests/geo_sort1.js b/jstests/geo_sort1.js
new file mode 100644
index 0000000..67de80e
--- /dev/null
+++ b/jstests/geo_sort1.js
@@ -0,0 +1,22 @@
+
+t = db.geo_sort1
+t.drop();
+
+for ( x=0; x<10; x++ ){
+ for ( y=0; y<10; y++ ){
+ t.insert( { loc : [ x , y ] , foo : x * x * y } );
+ }
+}
+
+t.ensureIndex( { loc : "2d" , foo : 1 } )
+
+q = t.find( { loc : { $near : [ 5 , 5 ] } , foo : { $gt : 20 } } )
+m = function(z){ return z.foo; }
+
+a = q.clone().map( m );
+b = q.clone().sort( { foo : 1 } ).map( m );
+
+assert.neq( a , b , "A" );
+a.sort();
+b.sort();
+assert.eq( a , b , "B" );
diff --git a/jstests/geo_update1.js b/jstests/geo_update1.js
new file mode 100644
index 0000000..68a8de6
--- /dev/null
+++ b/jstests/geo_update1.js
@@ -0,0 +1,38 @@
+
+t = db.geo_update1
+t.drop()
+
+for(var x = 0; x < 10; x++ ) {
+ for(var y = 0; y < 10; y++ ) {
+ t.insert({"loc": [x, y] , x : x , y : y , z : 1 });
+ }
+}
+
+t.ensureIndex( { loc : "2d" } )
+
+function p(){
+ print( "--------------" );
+ for ( var y=0; y<10; y++ ){
+ var c = t.find( { y : y } ).sort( { x : 1 } )
+ var s = "";
+ while ( c.hasNext() )
+ s += c.next().z + " ";
+ print( s )
+ }
+ print( "--------------" );
+}
+
+p()
+
+t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B1" )
+p()
+
+t.update({}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B2" )
+p()
+
+
+t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B3" )
+p()
diff --git a/jstests/geo_update2.js b/jstests/geo_update2.js
new file mode 100644
index 0000000..2308b2c
--- /dev/null
+++ b/jstests/geo_update2.js
@@ -0,0 +1,40 @@
+
+t = db.geo_update2
+t.drop()
+
+for(var x = 0; x < 10; x++ ) {
+ for(var y = 0; y < 10; y++ ) {
+ t.insert({"loc": [x, y] , x : x , y : y });
+ }
+}
+
+t.ensureIndex( { loc : "2d" } )
+
+function p(){
+ print( "--------------" );
+ for ( var y=0; y<10; y++ ){
+ var c = t.find( { y : y } ).sort( { x : 1 } )
+ var s = "";
+ while ( c.hasNext() )
+ s += c.next().z + " ";
+ print( s )
+ }
+ print( "--------------" );
+}
+
+p()
+
+
+t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B1" )
+p()
+
+t.update({}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B2" )
+p()
+
+
+t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}}, {'$inc' : { 'z' : 1}}, false, true);
+assert.isnull( db.getLastError() , "B3" )
+p()
+
diff --git a/jstests/geof.js b/jstests/geof.js
new file mode 100644
index 0000000..786ead6
--- /dev/null
+++ b/jstests/geof.js
@@ -0,0 +1,19 @@
+t = db.geof
+t.drop();
+
+// corners (dist ~0.98)
+t.insert({loc: [ 0.7, 0.7]})
+t.insert({loc: [ 0.7, -0.7]})
+t.insert({loc: [-0.7, 0.7]})
+t.insert({loc: [-0.7, -0.7]})
+
+// on x axis (dist == 0.9)
+t.insert({loc: [-0.9, 0]})
+t.insert({loc: [-0.9, 0]})
+
+t.ensureIndex( { loc : "2d" } )
+
+t.find({loc: {$near: [0,0]}}).limit(2).forEach( function(o){
+ //printjson(o);
+ assert.lt(Geo.distance([0,0], o.loc), 0.95);
+});
diff --git a/jstests/group6.js b/jstests/group6.js
index 8d738d4..b77a37a 100644
--- a/jstests/group6.js
+++ b/jstests/group6.js
@@ -29,3 +29,4 @@ for( i = 1; i <= 10; ++i ) {
assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+assert.eq.automsg( "NumberLong(10)", "t.group( {$reduce: function(doc, prev) { prev.count += 1; }, initial: {count: new NumberLong(0) }} )[ 0 ].count" ); \ No newline at end of file
diff --git a/jstests/in3.js b/jstests/in3.js
index 305fb22..b0a8bb7 100644
--- a/jstests/in3.js
+++ b/jstests/in3.js
@@ -8,4 +8,4 @@ assert.eq( {i:[[3,3],[6,6]]}, t.find( {i:{$in:[3,6]}} ).explain().indexBounds ,
for ( var i=0; i<20; i++ )
t.insert( { i : i } );
-assert.eq( 2 , t.find( {i:{$in:[3,6]}} ).explain().nscanned , "B1" )
+assert.eq( 3 , t.find( {i:{$in:[3,6]}} ).explain().nscanned , "B1" )
diff --git a/jstests/in4.js b/jstests/in4.js
index 9aed608..b6c2d10 100644
--- a/jstests/in4.js
+++ b/jstests/in4.js
@@ -27,7 +27,7 @@ checkRanges( {a:[[2,2],[3,3]],b:[[4,10]]}, t.find( {a:{$in:[2,3]},b:{$gt:4,$lt:1
t.save( {a:1,b:1} );
t.save( {a:2,b:4.5} );
t.save( {a:2,b:4} );
-assert.eq.automsg( "1", "t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().nscanned" );
assert.eq.automsg( "2", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).a" );
assert.eq.automsg( "4", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).b" );
@@ -41,7 +41,7 @@ assert.eq.automsg( "1", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" )
t.remove();
t.save( {a:2,b:4,c:5} );
t.save( {a:2,b:4,c:4} );
-assert.eq.automsg( "1", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
+assert.eq.automsg( "2", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
t.drop();
t.ensureIndex( {a:1,b:-1} );
diff --git a/jstests/index11.js b/jstests/index11.js
new file mode 100644
index 0000000..2a552dd
--- /dev/null
+++ b/jstests/index11.js
@@ -0,0 +1,13 @@
+// Reindex w/ field too large to index
+
+coll = db.jstests_index11;
+coll.drop();
+
+coll.ensureIndex({"k": 1, "v": 1});
+coll.insert({k: "x", v: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"});
+assert.eq(0, coll.find({"k": "x"}).count()); // SERVER-1716
+
+coll.dropIndexes();
+coll.ensureIndex({"k": 1, "v": 1});
+
+assert.eq(0, coll.find({"k": "x"}).count());
diff --git a/jstests/index_check6.js b/jstests/index_check6.js
index 240f4cf..d7992a2 100644
--- a/jstests/index_check6.js
+++ b/jstests/index_check6.js
@@ -12,10 +12,12 @@ for ( var age=10; age<50; age++ ){
assert.eq( 10 , t.find( { age : 30 } ).explain().nscanned , "A" );
assert.eq( 20 , t.find( { age : { $gte : 29 , $lte : 30 } } ).explain().nscanned , "B" );
-assert.eq( 12 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).explain().nscanned , "C1" );
+assert.eq( 18 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).explain().nscanned , "C1" );
+assert.eq( 23 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } } ).explain().nscanned , "C2" );
+assert.eq( 28 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } } ).explain().nscanned , "C3" );
-assert.eq( 2 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).explain().nscanned , "C" ); // SERVER-371
-assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).explain().nscanned , "D" ); // SERVER-371
+assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).explain().nscanned , "C" ); // SERVER-371
+assert.eq( 6 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).explain().nscanned , "D" ); // SERVER-371
assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } ).explain().nscanned" );
@@ -36,19 +38,30 @@ assert.eq.automsg( "2", "t.find( { a:5, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).
assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5.5,$lte:6} } ).sort( sort ).explain().nscanned" );
assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5,$lte:5.5} } ).sort( sort ).explain().nscanned" );
assert.eq.automsg( "3", "t.find( { a:5, b:5, c:{$gte:5,$lte:7} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "3", "t.find( { a:5, b:{$gte:5,$lte:7}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "3", "t.find( { a:{$gte:5,$lte:7}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "4", "t.find( { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "8", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "4", "t.find( { a:5, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+ if ( s.b > 0 ) {
+ assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
+ } else {
+ assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
+ }
+assert.eq.automsg( "7", "t.find( { a:5, b:{$gte:5,$lte:7}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+ if ( s.a > 0 ) {
+ assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "3", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+ } else {
+ assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+ assert.eq.automsg( "3", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+ }
+assert.eq.automsg( "7", "t.find( { a:{$gte:5,$lte:7}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "6", "t.find( { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "6", "t.find( { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "10", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
+assert.eq.automsg( "14", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
}
for ( var a = -1; a <= 1; a += 2 ) {
diff --git a/jstests/index_check7.js b/jstests/index_check7.js
index 68102d6..1d0aaeb 100644
--- a/jstests/index_check7.js
+++ b/jstests/index_check7.js
@@ -11,5 +11,5 @@ assert.eq( 1 , t.find( { x : 27 } ).explain().nscanned , "A" )
t.ensureIndex( { x : -1 } )
assert.eq( 1 , t.find( { x : 27 } ).explain().nscanned , "B" )
-assert.eq( 41 , t.find( { x : { $gt : 59 } } ).explain().nscanned , "C" );
+assert.eq( 40 , t.find( { x : { $gt : 59 } } ).explain().nscanned , "C" );
diff --git a/jstests/index_many2.js b/jstests/index_many2.js
index 3fca5f5..f113b8b 100644
--- a/jstests/index_many2.js
+++ b/jstests/index_many2.js
@@ -27,3 +27,5 @@ assert.eq( num - 1 , t.getIndexKeys().length , "B0" )
t.ensureIndex( { z : 1 } )
assert.eq( num , t.getIndexKeys().length , "B1" )
+t.dropIndex( "*" );
+assert.eq( 1 , t.getIndexKeys().length , "C1" )
diff --git a/jstests/index_sparse1.js b/jstests/index_sparse1.js
new file mode 100644
index 0000000..f2805b3
--- /dev/null
+++ b/jstests/index_sparse1.js
@@ -0,0 +1,46 @@
+
+t = db.index_sparse1;
+t.drop();
+
+t.insert( { _id : 1 , x : 1 } )
+t.insert( { _id : 2 , x : 2 } )
+t.insert( { _id : 3 , x : 2 } )
+t.insert( { _id : 4 } )
+t.insert( { _id : 5 } )
+
+assert.eq( 5 , t.count() , "A1" )
+assert.eq( 5 , t.find().sort( { x : 1 } ).itcount() , "A2" )
+
+t.ensureIndex( { x : 1 } )
+assert.eq( 2 , t.getIndexes().length , "B1" )
+assert.eq( 5 , t.find().sort( { x : 1 } ).itcount() , "B2" )
+t.dropIndex( { x : 1 } )
+assert.eq( 1 , t.getIndexes().length , "B3" )
+
+t.ensureIndex( { x : 1 } , { sparse : 1 } )
+assert.eq( 2 , t.getIndexes().length , "C1" )
+assert.eq( 3 , t.find().sort( { x : 1 } ).itcount() , "C2" )
+t.dropIndex( { x : 1 } )
+assert.eq( 1 , t.getIndexes().length , "C3" )
+
+// -- sparse & unique
+
+t.remove( { _id : 2 } )
+
+// test that we can't create a unique index without sparse
+t.ensureIndex( { x : 1 } , { unique : 1 } )
+assert( db.getLastError() , "D1" )
+assert.eq( 1 , t.getIndexes().length , "D2" )
+
+
+t.ensureIndex( { x : 1 } , { unique : 1 , sparse : 1 } )
+assert.eq( 2 , t.getIndexes().length , "E1" )
+t.dropIndex( { x : 1 } )
+assert.eq( 1 , t.getIndexes().length , "E3" )
+
+
+t.insert( { _id : 2 , x : 2 } )
+t.ensureIndex( { x : 1 } , { unique : 1 , sparse : 1 } )
+assert.eq( 1 , t.getIndexes().length , "F1" )
+
+
diff --git a/jstests/index_sparse2.js b/jstests/index_sparse2.js
new file mode 100644
index 0000000..2b16c9d
--- /dev/null
+++ b/jstests/index_sparse2.js
@@ -0,0 +1,21 @@
+t = db.index_sparse2;
+t.drop();
+
+t.insert( { _id : 1 , x : 1 , y : 1 } )
+t.insert( { _id : 2 , x : 2 } )
+t.insert( { _id : 3 } )
+
+t.ensureIndex( { x : 1 , y : 1 } )
+assert.eq( 2 , t.getIndexes().length , "A1" )
+assert.eq( 3 , t.find().sort( { x : 1 , y : 1 } ).itcount() , "A2" )
+t.dropIndex( { x : 1 , y : 1 } )
+assert.eq( 1 , t.getIndexes().length , "A3" )
+
+t.ensureIndex( { x : 1 , y : 1 } , { sparse : 1 } )
+assert.eq( 2 , t.getIndexes().length , "B1" )
+assert.eq( 2 , t.find().sort( { x : 1 , y : 1 } ).itcount() , "B2" )
+t.dropIndex( { x : 1 , y : 1 } )
+assert.eq( 1 , t.getIndexes().length , "B3" )
+
+
+
diff --git a/jstests/indexh.js b/jstests/indexh.js
index c6aad18..ac2a93e 100644
--- a/jstests/indexh.js
+++ b/jstests/indexh.js
@@ -6,11 +6,17 @@ function debug( t ) {
print( t );
}
+function extraDebug() {
+// printjson( db.stats() );
+// db.printCollectionStats();
+}
+
// index extent freeing
t.drop();
t.save( {} );
var s1 = db.stats().dataSize;
debug( "s1: " + s1 );
+extraDebug();
t.ensureIndex( {a:1} );
var s2 = db.stats().dataSize;
debug( "s2: " + s2 );
@@ -18,6 +24,7 @@ assert.automsg( "s1 < s2" );
t.dropIndex( {a:1} );
var s3 = db.stats().dataSize;
debug( "s3: " + s3 );
+extraDebug();
assert.eq.automsg( "s1", "s3" );
// index node freeing
diff --git a/jstests/indexi.js b/jstests/indexi.js
new file mode 100644
index 0000000..b54ffce
--- /dev/null
+++ b/jstests/indexi.js
@@ -0,0 +1,16 @@
+t = db.jstests_indexi;
+
+t.drop();
+
+for( var a = 0; a < 10; ++a ) {
+ for( var b = 0; b < 10; ++b ) {
+ for( var c = 0; c < 10; ++c ) {
+ t.save( {a:a,b:b,c:c} );
+ }
+ }
+}
+
+t.ensureIndex( {a:1,b:1,c:1} );
+t.ensureIndex( {a:1,c:1} );
+
+assert.automsg( "!t.find( {a:{$gt:1,$lt:10},c:{$gt:1,$lt:10}} ).explain().indexBounds.b" ); \ No newline at end of file
diff --git a/jstests/indexj.js b/jstests/indexj.js
new file mode 100644
index 0000000..0d1afc2
--- /dev/null
+++ b/jstests/indexj.js
@@ -0,0 +1,44 @@
+// SERVER-726
+
+t = db.jstests_indexj;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:5} );
+assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "A" );
+
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:4} );
+assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "B" );
+
+t.save( {a:5} );
+assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "D" );
+
+t.save( {a:4} );
+assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "C" );
+
+t.save( {a:5} );
+assert.eq( 0, t.find( { a: { $gt:4, $lt:5 } } ).explain().nscanned, "D" );
+
+t.drop();
+t.ensureIndex( {a:1,b:1} );
+t.save( { a:1,b:1 } );
+t.save( { a:1,b:2 } );
+t.save( { a:2,b:1 } );
+t.save( { a:2,b:2 } );
+
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+
+t.save( {a:1,b:1} );
+t.save( {a:1,b:1} );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+
+assert.eq( 1, t.find( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
+assert.eq( 1, t.find( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+
+t.save( { a:1,b:1.5} );
+assert.eq( 3, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned, "F" );
diff --git a/jstests/insert2.js b/jstests/insert2.js
new file mode 100644
index 0000000..442e7dc
--- /dev/null
+++ b/jstests/insert2.js
@@ -0,0 +1,8 @@
+
+t = db.insert2
+t.drop()
+
+assert.isnull( t.findOne() , "A" )
+t.insert( { z : 1 , $inc : { x : 1 } } , true );
+assert.isnull( t.findOne() , "B" )
+
diff --git a/jstests/jni2.js b/jstests/jni2.js
index 221780d..53ad58c 100644
--- a/jstests/jni2.js
+++ b/jstests/jni2.js
@@ -14,8 +14,8 @@ assert.throws( function(){
db.jni2t.save( { y : 1 } );
return 1;
}
- } ).length();
-} , "can't save from $where" );
+ } ).forEach( printjson );
+} , null , "can't save from $where" );
assert.eq( 0 , db.jni2t.find().length() , "B" )
diff --git a/jstests/killop.js b/jstests/killop.js
new file mode 100644
index 0000000..b5e50d9
--- /dev/null
+++ b/jstests/killop.js
@@ -0,0 +1,43 @@
+t = db.jstests_killop
+t.drop();
+
+if ( typeof _threadInject == "undefined" ) { // don't run in v8 mode - SERVER-1900
+
+function debug( x ) {
+// printjson( x );
+}
+
+t.save( {} );
+db.getLastError();
+
+function ops() {
+ p = db.currentOp().inprog;
+ debug( p );
+ ids = [];
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( o.active && o.query && o.query.query && o.query.query.$where && o.ns == "test.jstests_killop" ) {
+ ids.push( o.opid );
+ }
+ }
+ return ids;
+}
+
+s1 = startParallelShell( "db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )" );
+s2 = startParallelShell( "db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )" );
+
+o = [];
+assert.soon( function() { o = ops(); return o.length == 2; } );
+debug( o );
+db.killOp( o[ 0 ] );
+db.killOp( o[ 1 ] );
+
+start = new Date();
+
+s1();
+s2();
+
+// don't want to pass if timeout killed the js function
+assert( ( new Date() ) - start < 30000 );
+
+} \ No newline at end of file
diff --git a/jstests/libs/concurrent.js b/jstests/libs/concurrent.js
new file mode 100644
index 0000000..9198818
--- /dev/null
+++ b/jstests/libs/concurrent.js
@@ -0,0 +1,30 @@
+/* NOTE: Requires mongo shell to be built with V8 javascript engine,
+which implements concurrent threads via fork() */
+
+// Fork and start
+function fork_(thunk) {
+ thread = fork(thunk)
+ thread.start()
+ return thread
+}
+
+// In functional form, useful for high-order functions like map in fun.js
+function join_(thread) {thread.join()}
+
+// Fork a loop on each one-arg block and wait for all of them to terminate. Foreground blocks are executed n times, background blocks are executed repeatedly until all forground loops finish. If any fail, stop all loops and reraise exception in main thread
+function parallel(n, foregroundBlock1s, backgroundBlock1s) {
+ var err = null
+ var stop = false
+ function loop(m) {return function(block1) {return function() {
+ for (var i = 0; i < m; i++) {if (stop) break; block1(i)} }}}
+ function watch(block) {return function() {
+ try {block()} catch(e) {err = e; stop = true}}}
+ foreThunks = map(watch, map(loop(n), foregroundBlock1s))
+ backThunks = map(watch, map(loop(Infinity), backgroundBlock1s))
+ foreThreads = map(fork_, foreThunks)
+ backThreads = map(fork_, backThunks)
+ map(join_, foreThreads)
+ stop = true
+ map(join_, backThreads)
+ if (err != null) throw err
+}
diff --git a/jstests/libs/fun.js b/jstests/libs/fun.js
new file mode 100644
index 0000000..276f32a
--- /dev/null
+++ b/jstests/libs/fun.js
@@ -0,0 +1,32 @@
+// General high-order functions
+
+function forEach (action, array) {
+ for (var i = 0; i < array.length; i++)
+ action (array[i]);
+}
+
+function foldl (combine, base, array) {
+ for (var i = 0; i < array.length; i++)
+ base = combine (base, array[i]);
+ return base
+}
+
+function foldr (combine, base, array) {
+ for (var i = array.length - 1; i >= 0; i--)
+ base = combine (array[i], base);
+ return base
+}
+
+function map (func, array) {
+ var result = [];
+ for (var i = 0; i < array.length; i++)
+ result.push (func (array[i]));
+ return result
+}
+
+function filter (pred, array) {
+ var result = []
+ for (var i = 0; i < array.length; i++)
+ if (pred (array[i])) result.push (array[i]);
+ return result
+}
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
new file mode 100644
index 0000000..8624ef2
--- /dev/null
+++ b/jstests/libs/geo_near_random.js
@@ -0,0 +1,78 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ for (var i=0; i<nPts; i++){
+ this.t.insert({_id: i, loc: this.mkPt()});
+ }
+
+ this.t.ensureIndex({loc: '2d'});
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+ assert.eq(short[i], long[i]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/jstests/libs/grid.js b/jstests/libs/grid.js
new file mode 100644
index 0000000..7aef176
--- /dev/null
+++ b/jstests/libs/grid.js
@@ -0,0 +1,172 @@
+// Grid infrastructure: Servers, ReplicaSets, ConfigSets, Shards, Routers (mongos). Convenient objects and functions on top of those in shell/servers.js -Tony
+
+load('jstests/libs/fun.js')
+load('jstests/libs/network.js')
+
+// New servers and routers take and increment port number from this.
+// A comment containing FreshPorts monad implies reading and incrementing this, IO may also read/increment this.
+var nextPort = 31000
+
+/*** Server is the spec of a mongod, ie. all its command line options.
+ To start a server call 'begin' ***/
+// new Server :: String -> FreshPorts Server
+function Server (name) {
+ this.dbpath = '/data/db/' + name + nextPort
+ this.port = nextPort++
+ this.noprealloc = ''
+ this.smallfiles = ''
+ this.rest = ''
+ this.oplogSize = 8
+}
+
+Server.prototype.addr = '127.0.0.1'
+
+// Server -> String <addr:port>
+Server.prototype.host = function() {
+ return this.addr + ':' + this.port
+}
+
+// Start a new server with this spec and return connection to it
+// Server -> IO Connection
+Server.prototype.begin = function() {
+ return startMongodEmpty(this)
+}
+
+// Stop server and remove db directory
+// Server -> IO ()
+Server.prototype.end = function() {
+ print('Stopping mongod on port ' + this.port)
+ stopMongod (this.port)
+ resetDbpath (this.dbpath)
+}
+
+// Cut server from network so it is unreachable (but still alive)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function cutServer (conn) {
+ var addrport = parseHost (conn.host)
+ cutNetwork (addrport.port)
+}
+
+// Ensure server is connected to network (undo cutServer)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function uncutServer (conn) {
+ var iport = parseHost (conn.host)
+ restoreNetwork (iport.port)
+}
+
+// Kill server process at other end of this connection
+function killServer (conn, _signal) {
+ var signal = _signal || 15
+ var iport = parseHost (conn.host)
+ stopMongod (iport.port, signal)
+}
+
+/*** ReplicaSet is the spec of a replica set, ie. options given to ReplicaSetTest.
+ To start a replica set call 'begin' ***/
+// new ReplicaSet :: String -> Int -> FreshPorts ReplicaSet
+function ReplicaSet (name, numServers) {
+ this.name = name
+ this.host = '127.0.0.1'
+ this.nodes = numServers
+ this.startPort = nextPort
+ this.oplogSize = 40
+ nextPort += numServers
+}
+
+// Start a replica set with this spec and return ReplSetTest, which hold connections to the servers including the master server. Call ReplicaSetTest.stopSet() to end all servers
+// ReplicaSet -> IO ReplicaSetTest
+ReplicaSet.prototype.begin = function() {
+ var rs = new ReplSetTest(this)
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+// Create a new server and add it to replica set
+// ReplicaSetTest -> IO Connection
+ReplSetTest.prototype.addServer = function() {
+ var conn = this.add()
+ nextPort++
+ this.reInitiate()
+ this.awaitReplication()
+ assert.soon(function() {
+ var doc = conn.getDB('admin').isMaster()
+ return doc['ismaster'] || doc['secondary']
+ })
+ return conn
+}
+
+/*** ConfigSet is a set of specs (Servers) for sharding config servers.
+ Supply either the servers or the number of servers desired.
+ To start the config servers call 'begin' ***/
+// new ConfigSet :: [Server] or Int -> FreshPorts ConfigSet
+function ConfigSet (configSvrsOrNumSvrs) {
+ if (typeof configSvrsOrNumSvrs == 'number') {
+ this.configSvrs = []
+ for (var i = 0; i < configSvrsOrNumSvrs; i++)
+ this.configSvrs.push (new Server ('config'))
+ } else
+ this.configSvrs = configSvrs
+}
+
+// Start config servers, return list of connections to them
+// ConfigSet -> IO [Connection]
+ConfigSet.prototype.begin = function() {
+ return map (function(s) {return s.begin()}, this.configSvrs)
+}
+
+// Stop config servers
+// ConfigSet -> IO ()
+ConfigSet.prototype.end = function() {
+ return map (function(s) {return s.end()}, this.configSvrs)
+}
+
+/*** Router is the spec for a mongos, ie, its command line options.
+ To start a router (mongos) call 'begin' ***/
+// new Router :: ConfigSet -> FreshPorts Router
+function Router (configSet) {
+ this.port = nextPort++
+ this.v = 0
+ this.configdb = map (function(s) {return s.host()}, configSet.configSvrs) .join(',')
+ this.chunkSize = 1
+}
+
+// Start router (mongos) with this spec and return connection to it
+// Router -> IO Connection
+Router.prototype.begin = function() {
+ return startMongos (this)
+}
+
+// Stop router
+// Router -> IO ()
+Router.prototype.end = function() {
+ return stopMongoProgram (this.port)
+}
+
+// Add shard to config via router (mongos) connection. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> IO ()
+function addShard (routerConn, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({addshard: repSetOrHostName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> IO ()
+function enableSharding (routerConn, dbName) {
+ var ack = routerConn.getDB('admin').runCommand ({enablesharding: dbName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> String -> String -> IO ()
+function shardCollection (routerConn, dbName, collName, shardKey) {
+ var ack = routerConn.getDB('admin').runCommand ({shardcollection: dbName + '.' + collName, key: shardKey})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Move db from its current primary shard to given shard. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> String -> IO ()
+function moveDB (routerConn, dbname, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({moveprimary: dbname, to: repSetOrHostName})
+ printjson(ack)
+ assert (ack['ok'], tojson(ack))
+}
diff --git a/jstests/libs/network.js b/jstests/libs/network.js
new file mode 100644
index 0000000..e5b33f3
--- /dev/null
+++ b/jstests/libs/network.js
@@ -0,0 +1,37 @@
+
+// Parse "127.0.0.1:300" into {addr: "127.0.0.1", port: 300},
+// and "127.0.0.1" into {addr: "127.0.0.1", port: undefined}
+function parseHost (hostString) {
+ var items = hostString.match(/(\d+.\d+.\d+.\d+)(:(\d+))?/)
+ return {addr: items[1], port: parseInt(items[3])}
+}
+
+
+/* Network traffic shaping (packet dropping) to simulate network problems
+ Currently works on BSD Unix and Mac OS X only (using ipfw).
+ Requires sudo access.
+ TODO: make it work on Linux too (using iptables). */
+
+var nextRuleNum = 100 // this grows indefinitely but can't exceed 65534, so can't call routines below indefinitely
+var portRuleNum = {}
+
+// Cut network connection to local port by dropping packets using iptables
+function cutNetwork (port) {
+ portRuleNum[port] = nextRuleNum
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any to any ' + port)
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any ' + port + ' to any')
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
+
+// Restore network connection to local port by not dropping packets using iptables
+function restoreNetwork (port) {
+ var ruleNum = portRuleNum[port]
+ if (ruleNum) {
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum++)
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum)
+ delete portRuleNum[port]
+ }
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
diff --git a/jstests/misc/biginsert.js b/jstests/misc/biginsert.js
new file mode 100755
index 0000000..ebbdc18
--- /dev/null
+++ b/jstests/misc/biginsert.js
@@ -0,0 +1,18 @@
+o = "xxxxxxxxxxxxxxxxxxx";
+o = o + o;
+o + o;
+o = o + o;
+o = o + o;
+o = o + o;
+
+var B = 40000;
+var last = new Date();
+for (i = 0; i < 30000000; i++) {
+ db.foo.insert({ o: o });
+ if (i % B == 0) {
+ var n = new Date();
+ print(i);
+ print("per sec: " + B*1000 / (n - last));
+ last = n;
+ }
+}
diff --git a/jstests/mr1.js b/jstests/mr1.js
index aacd69b..dc81534 100644
--- a/jstests/mr1.js
+++ b/jstests/mr1.js
@@ -49,7 +49,7 @@ r2 = function( key , values ){
return total;
};
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r } );
+res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
d( res );
if ( ks == "_id" ) assert( res.ok , "not ok" );
assert.eq( 4 , res.counts.input , "A" );
@@ -66,7 +66,7 @@ assert.eq( 3 , z.b , "E" );
assert.eq( 3 , z.c , "F" );
x.drop();
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , query : { x : { "$gt" : 2 } } } );
+res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , query : { x : { "$gt" : 2 } } , out : "mr1_out" } );
d( res );
assert.eq( 2 , res.counts.input , "B" );
x = db[res.result];
@@ -77,7 +77,7 @@ assert.eq( 1 , z.b , "C2" );
assert.eq( 2 , z.c , "C3" );
x.drop();
-res = db.runCommand( { mapreduce : "mr1" , map : m2 , reduce : r2 , query : { x : { "$gt" : 2 } } } );
+res = db.runCommand( { mapreduce : "mr1" , map : m2 , reduce : r2 , query : { x : { "$gt" : 2 } } , out : "mr1_out" } );
d( res );
assert.eq( 2 , res.counts.input , "B" );
x = db[res.result];
@@ -104,7 +104,7 @@ for ( i=5; i<1000; i++ ){
t.save( { x : i , tags : [ "b" , "d" ] } );
}
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r } );
+res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
d( res );
assert.eq( 999 , res.counts.input , "Z1" );
x = db[res.result];
@@ -125,12 +125,12 @@ assert.eq( 995 , getk( "d" ).value.count , "ZD" );
x.drop();
if ( true ){
- printjson( db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , verbose : true } ) );
+ printjson( db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , verbose : true , out : "mr1_out" } ) );
}
print( "t1: " + Date.timeFunc(
function(){
- var out = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r } );
+ var out = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
if ( ks == "_id" ) assert( out.ok , "XXX : " + tojson( out ) );
db[out.result].drop();
} , 10 ) + " (~500 on 2.8ghz) - itcount: " + Date.timeFunc( function(){ db.mr1.find().itcount(); } , 10 ) );
@@ -138,7 +138,7 @@ print( "t1: " + Date.timeFunc(
// test doesn't exist
-res = db.runCommand( { mapreduce : "lasjdlasjdlasjdjasldjalsdj12e" , map : m , reduce : r } );
+res = db.runCommand( { mapreduce : "lasjdlasjdlasjdjasldjalsdj12e" , map : m , reduce : r , out : "mr1_out" } );
assert( ! res.ok , "should be not ok" );
if ( true ){
@@ -166,11 +166,15 @@ if ( true ){
}
x.drop();
- res = db.runCommand( { mapreduce : "mr1" , out : "mr1_foo" , map : m2 , reduce : r2 } );
+ res = db.runCommand( { mapreduce : "mr1" , out : "mr1_foo" , map : m2 , reduce : r2 , out : "mr1_out" } );
d(res);
print( "t3: " + res.timeMillis + " (~3500 on 2.8ghz)" );
+
+ res = db.runCommand( { mapreduce : "mr1" , map : m2 , reduce : r2 , out : { inline : true } } );
+ print( "t4: " + res.timeMillis );
+
}
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r } );
+res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
assert( res.ok , "should be ok" );
diff --git a/jstests/mr2.js b/jstests/mr2.js
index 0a8e9d6..709c305 100644
--- a/jstests/mr2.js
+++ b/jstests/mr2.js
@@ -29,7 +29,12 @@ function r( who , values ){
function reformat( r ){
var x = {};
- r.find().forEach(
+ var cursor;
+ if ( r.results )
+ cursor = r.results;
+ else
+ cursor = r.find();
+ cursor.forEach(
function(z){
x[z._id] = z.value;
}
@@ -41,10 +46,22 @@ function f( who , res ){
res.avg = res.totalSize / res.num;
return res;
}
-res = t.mapReduce( m , r , { finalize : f } );
+
+res = t.mapReduce( m , r , { finalize : f , out : "mr2_out" } );
+printjson( res )
x = reformat( res );
-assert.eq( 9 , x.a.avg , "A" );
-assert.eq( 16 , x.b.avg , "B" );
-assert.eq( 18 , x.c.avg , "C" );
+assert.eq( 9 , x.a.avg , "A1" );
+assert.eq( 16 , x.b.avg , "A2" );
+assert.eq( 18 , x.c.avg , "A3" );
res.drop();
+res = t.mapReduce( m , r , { finalize : f , out : { inline : 1 } } );
+printjson( res )
+x = reformat( res );
+assert.eq( 9 , x.a.avg , "B1" );
+assert.eq( 16 , x.b.avg , "B2" );
+assert.eq( 18 , x.c.avg , "B3" );
+res.drop();
+
+assert( ! ( "result" in res ) , "B4" )
+
diff --git a/jstests/mr3.js b/jstests/mr3.js
index e7d1f2c..3b0a918 100644
--- a/jstests/mr3.js
+++ b/jstests/mr3.js
@@ -25,7 +25,7 @@ r = function( key , values ){
return { count : total };
};
-res = t.mapReduce( m , r );
+res = t.mapReduce( m , r , { out : "mr3_out" } );
z = res.convertToSingleObject()
assert.eq( 3 , Object.keySet( z ).length , "A1" );
@@ -35,7 +35,7 @@ assert.eq( 3 , z.c.count , "A4" );
res.drop();
-res = t.mapReduce( m , r , { mapparams : [ 2 , 2 ] } );
+res = t.mapReduce( m , r , { out : "mr3_out" , mapparams : [ 2 , 2 ] } );
z = res.convertToSingleObject()
assert.eq( 3 , Object.keySet( z ).length , "B1" );
@@ -52,7 +52,7 @@ realm = m;
m = function(){
emit( this._id , 1 );
}
-res = t.mapReduce( m , r );
+res = t.mapReduce( m , r , { out : "mr3_out" } );
res.drop();
m = function(){
@@ -60,7 +60,7 @@ m = function(){
}
before = db.getCollectionNames().length;
-assert.throws( function(){ t.mapReduce( m , r ); } );
+assert.throws( function(){ t.mapReduce( m , r , { out : "mr3_out" } ); } );
assert.eq( before , db.getCollectionNames().length , "after throw crap" );
@@ -69,5 +69,5 @@ r = function( k , v ){
return v.x.x.x;
}
before = db.getCollectionNames().length;
-assert.throws( function(){ t.mapReduce( m , r ); } );
+assert.throws( function(){ t.mapReduce( m , r , "mr3_out" ) } )
assert.eq( before , db.getCollectionNames().length , "after throw crap" );
diff --git a/jstests/mr4.js b/jstests/mr4.js
index b14cdfe..78c8bce 100644
--- a/jstests/mr4.js
+++ b/jstests/mr4.js
@@ -23,7 +23,7 @@ r = function( key , values ){
return { count : total };
};
-res = t.mapReduce( m , r , { scope : { xx : 1 } } );
+res = t.mapReduce( m , r , { out : "mr4_out" , scope : { xx : 1 } } );
z = res.convertToSingleObject()
assert.eq( 3 , Object.keySet( z ).length , "A1" );
@@ -34,7 +34,7 @@ assert.eq( 3 , z.c.count , "A4" );
res.drop();
-res = t.mapReduce( m , r , { scope : { xx : 2 } } );
+res = t.mapReduce( m , r , { scope : { xx : 2 } , out : "mr4_out" } );
z = res.convertToSingleObject()
assert.eq( 3 , Object.keySet( z ).length , "A1" );
diff --git a/jstests/mr5.js b/jstests/mr5.js
index bbac3fe..50a63d1 100644
--- a/jstests/mr5.js
+++ b/jstests/mr5.js
@@ -25,7 +25,7 @@ r = function( k , v ){
return { stats : stats , total : total }
}
-res = t.mapReduce( m , r , { scope : { xx : 1 } } );
+res = t.mapReduce( m , r , { out : "mr5_out" , scope : { xx : 1 } } );
//res.find().forEach( printjson )
z = res.convertToSingleObject()
@@ -44,7 +44,7 @@ m = function(){
-res = t.mapReduce( m , r , { scope : { xx : 1 } } );
+res = t.mapReduce( m , r , { out : "mr5_out" , scope : { xx : 1 } } );
//res.find().forEach( printjson )
z = res.convertToSingleObject()
diff --git a/jstests/mr_bigobject.js b/jstests/mr_bigobject.js
index 8224209..4466b8d 100644
--- a/jstests/mr_bigobject.js
+++ b/jstests/mr_bigobject.js
@@ -3,11 +3,11 @@ t = db.mr_bigobject
t.drop()
s = "";
-while ( s.length < ( 1024 * 1024 ) ){
+while ( s.length < ( 6 * 1024 * 1024 ) ){
s += "asdasdasd";
}
-for ( i=0; i<10; i++ )
+for ( i=0; i<5; i++ )
t.insert( { _id : i , s : s } )
m = function(){
@@ -18,13 +18,14 @@ r = function( k , v ){
return 1;
}
-assert.throws( function(){ t.mapReduce( m , r ); } , "emit should fail" )
+assert.throws( function(){ r = t.mapReduce( m , r , "mr_bigobject_out" ); } , null , "emit should fail" )
+
m = function(){
emit( 1 , this.s );
}
-assert.eq( { 1 : 1 } , t.mapReduce( m , r ).convertToSingleObject() , "A1" )
+assert.eq( { 1 : 1 } , t.mapReduce( m , r , "mr_bigobject_out" ).convertToSingleObject() , "A1" )
r = function( k , v ){
total = 0;
@@ -38,4 +39,6 @@ r = function( k , v ){
return total;
}
-assert.eq( { 1 : 10 * s.length } , t.mapReduce( m , r ).convertToSingleObject() , "A1" )
+assert.eq( { 1 : t.count() * s.length } , t.mapReduce( m , r , "mr_bigobject_out" ).convertToSingleObject() , "A1" )
+
+t.drop()
diff --git a/jstests/mr_comments.js b/jstests/mr_comments.js
new file mode 100644
index 0000000..f6a0699
--- /dev/null
+++ b/jstests/mr_comments.js
@@ -0,0 +1,28 @@
+
+t = db.mr_comments
+t.drop()
+
+t.insert( { foo : 1 } )
+t.insert( { foo : 1 } )
+t.insert( { foo : 2 } )
+
+res = db.runCommand(
+ { mapreduce : "mr_comments",
+ map : "// This will fail\n\n // Emit some stuff\n emit(this.foo, 1)\n",
+ reduce : function(key, values){
+ return Array.sum(values);
+ },
+ out: "mr_comments_out"
+ });
+assert.eq( 3 , res.counts.emit )
+
+res = db.runCommand(
+ { mapreduce : "mr_comments",
+ map : "// This will fail\nfunction(){\n // Emit some stuff\n emit(this.foo, 1)\n}\n",
+ reduce : function(key, values){
+ return Array.sum(values);
+ },
+ out: "mr_comments_out"
+ });
+
+assert.eq( 3 , res.counts.emit )
diff --git a/jstests/mr_errorhandling.js b/jstests/mr_errorhandling.js
index 57724f1..c4e1137 100644
--- a/jstests/mr_errorhandling.js
+++ b/jstests/mr_errorhandling.js
@@ -24,7 +24,7 @@ r = function( k , v ){
return total;
}
-res = t.mapReduce( m_good , r );
+res = t.mapReduce( m_good , r , "mr_errorhandling_out" );
assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
res.drop()
@@ -32,7 +32,7 @@ res = null;
theerror = null;
try {
- res = t.mapReduce( m_bad , r );
+ res = t.mapReduce( m_bad , r , "mr_errorhandling_out" );
}
catch ( e ){
theerror = e.toString();
@@ -42,6 +42,8 @@ assert( theerror , "B2" );
assert( theerror.indexOf( "emit" ) >= 0 , "B3" );
// test things are still in an ok state
-res = t.mapReduce( m_good , r );
+res = t.mapReduce( m_good , r , "mr_errorhandling_out" );
assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
res.drop()
+
+assert.throws( function(){ t.mapReduce( m_good , r , { out : "xxx" , query : "foo" } ); } )
diff --git a/jstests/mr_index.js b/jstests/mr_index.js
new file mode 100644
index 0000000..521d44d
--- /dev/null
+++ b/jstests/mr_index.js
@@ -0,0 +1,43 @@
+
+t = db.mr_index
+t.drop()
+
+outName = "mr_index_out"
+out = db[outName]
+out.drop()
+
+t.insert( { tags : [ 1 ] } )
+t.insert( { tags : [ 1 , 2 ] } )
+t.insert( { tags : [ 1 , 2 , 3 ] } )
+t.insert( { tags : [ 3 ] } )
+t.insert( { tags : [ 2 , 3 ] } )
+t.insert( { tags : [ 2 , 3 ] } )
+t.insert( { tags : [ 1 , 2 ] } )
+
+m = function(){
+ for ( i=0; i<this.tags.length; i++ )
+ emit( this.tags[i] , 1 );
+}
+
+r = function( k , vs ){
+ return Array.sum( vs );
+}
+
+ex = function(){
+ return out.find().sort( { value : 1 } ).explain()
+}
+
+res = t.mapReduce( m , r , { out : outName } )
+
+assert.eq( "BasicCursor" , ex().cursor , "A1" )
+out.ensureIndex( { value : 1 } )
+assert.eq( "BtreeCursor value_1" , ex().cursor , "A2" )
+assert.eq( 3 , ex().n , "A3" )
+
+res = t.mapReduce( m , r , { out : outName } )
+
+assert.eq( "BtreeCursor value_1" , ex().cursor , "B1" )
+assert.eq( 3 , ex().n , "B2" )
+res.drop()
+
+
diff --git a/jstests/mr_index2.js b/jstests/mr_index2.js
new file mode 100644
index 0000000..a8d845e
--- /dev/null
+++ b/jstests/mr_index2.js
@@ -0,0 +1,22 @@
+
+t = db.mr_index2;
+t.drop()
+
+t.save( { arr : [1, 2] } )
+
+map = function() { emit(this._id, 1) }
+reduce = function(k,vals) { return Array.sum( vals ); }
+
+res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : {} })
+assert.eq( 1 ,res.counts.input , "A" )
+res.drop()
+
+res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : { arr: {$gte:0} } })
+assert.eq( 1 ,res.counts.input , "B" )
+res.drop()
+
+t.ensureIndex({arr:1})
+res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : { arr: {$gte:0} } })
+assert.eq( 1 ,res.counts.input , "C" )
+res.drop();
+
diff --git a/jstests/mr_index3.js b/jstests/mr_index3.js
new file mode 100644
index 0000000..0607cc8
--- /dev/null
+++ b/jstests/mr_index3.js
@@ -0,0 +1,50 @@
+
+t = db.mr_index3
+t.drop();
+
+t.insert( { _id : 1, name : 'name1', tags : ['dog', 'cat'] } );
+t.insert( { _id : 2, name : 'name2', tags : ['cat'] } );
+t.insert( { _id : 3, name : 'name3', tags : ['mouse', 'cat', 'dog'] } );
+t.insert( { _id : 4, name : 'name4', tags : [] } );
+
+m = function(){
+ for ( var i=0; i<this.tags.length; i++ )
+ emit( this.tags[i] , 1 )
+};
+
+r = function( key , values ){
+ return Array.sum( values );
+};
+
+a1 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r , out : { inline : true } } ).results
+a2 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : 'name1'} , out : { inline : true }}).results
+a3 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : {$gt:'name'} } , out : { inline : true }}).results
+
+assert.eq( [
+ {
+ "_id" : "cat",
+ "value" : 3
+ },
+ {
+ "_id" : "dog",
+ "value" : 2
+ },
+ {
+ "_id" : "mouse",
+ "value" : 1
+ }
+] , a1 , "A1" );
+assert.eq( [ { "_id" : "cat", "value" : 1 }, { "_id" : "dog", "value" : 1 } ] , a2 , "A2" )
+assert.eq( a1 , a3 , "A3" )
+
+t.ensureIndex({name:1, tags:1});
+
+b1 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r , out : { inline : true } } ).results
+b2 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : 'name1'} , out : { inline : true }}).results
+b3 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : {$gt:'name'} } , out : { inline : true }}).results
+
+assert.eq( a1 , b1 , "AB1" )
+assert.eq( a2 , b2 , "AB2" )
+assert.eq( a3 , b3 , "AB3" )
+
+
diff --git a/jstests/mr_killop.js b/jstests/mr_killop.js
new file mode 100644
index 0000000..899997d
--- /dev/null
+++ b/jstests/mr_killop.js
@@ -0,0 +1,127 @@
+t = db.jstests_mr_killop;
+t.drop();
+t2 = db.jstests_mr_killop_out;
+t2.drop();
+
+if ( typeof _threadInject == "undefined" ) { // don't run in v8 mode - SERVER-1900
+
+ function debug( x ) {
+// printjson( x );
+ }
+
+ /** @return op code for map reduce op created by spawned shell, or that op's child */
+ function op( where ) {
+ p = db.currentOp().inprog;
+ debug( p );
+ for ( var i in p ) {
+ var o = p[ i ];
+ if ( where ) {
+ if ( o.active && o.ns == "test.jstests_mr_killop" && o.query && o.query.$where ) {
+ return o.opid;
+ }
+ } else {
+ if ( o.active && o.query && o.query.mapreduce && o.query.mapreduce == "jstests_mr_killop" ) {
+ return o.opid;
+ }
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Run one map reduce with the specified parameters in a parallel shell, kill the
+ * map reduce op or its child op with killOp, and wait for the map reduce op to
+ * terminate.
+ * @where - if true, a count $where op is killed rather than the map reduce op.
+ * This is necessay for a child count $where of a map reduce op because child
+ * ops currently mask parent ops in currentOp.
+ */
+ function testOne( map, reduce, finalize, scope, where, wait ) {
+ t.drop();
+ t2.drop();
+ // Ensure we have 2 documents for the reduce to run
+ t.save( {a:1} );
+ t.save( {a:1} );
+ db.getLastError();
+
+ spec =
+ {
+ mapreduce:"jstests_mr_killop",
+ out:"jstests_mr_killop_out",
+ map: map,
+ reduce: reduce
+ };
+ if ( finalize ) {
+ spec[ "finalize" ] = finalize;
+ }
+ if ( scope ) {
+ spec[ "scope" ] = scope;
+ }
+
+ // Windows shell strips all double quotes from command line, so use
+ // single quotes.
+ stringifiedSpec = tojson( spec ).toString().replace( /\n/g, ' ' ).replace( /\"/g, "\'" );
+
+ // The assert below won't be caught by this test script, but it will cause error messages
+ // to be printed.
+ s = startParallelShell( "assert.commandWorked( db.runCommand( " + stringifiedSpec + " ) );" );
+
+ if ( wait ) {
+ sleep( 2000 );
+ }
+
+ o = null;
+ assert.soon( function() { o = op( where ); return o != -1 } );
+
+ db.killOp( o );
+ debug( "did kill" );
+
+ // When the map reduce op is killed, the spawned shell will exit
+ s();
+ debug( "parallel shell completed" );
+
+ assert.eq( -1, op( where ) );
+ }
+
+ /** Test using wait and non wait modes */
+ function test( map, reduce, finalize, scope, where ) {
+ testOne( map, reduce, finalize, scope, where, false );
+ testOne( map, reduce, finalize, scope, where, true );
+ }
+
+ /** Test looping in map and reduce functions */
+ function runMRTests( loop, where ) {
+ test( loop, function( k, v ) { return v[ 0 ]; }, null, null, where );
+ test( function() { emit( this.a, 1 ); }, loop, null, null, where );
+ test( function() { loop(); }, function( k, v ) { return v[ 0 ] }, null, { loop: loop }, where );
+ }
+
+ /** Test looping in finalize function */
+ function runFinalizeTests( loop, where ) {
+ test( function() { emit( this.a, 1 ); }, function( k, v ) { return v[ 0 ] }, loop, null, where );
+ test( function() { emit( this.a, 1 ); }, function( k, v ) { return v[ 0 ] }, function( a, b ) { loop() }, { loop: loop }, where );
+ }
+
+ var loop = function() {
+ while( 1 ) {
+ ;
+ }
+ }
+ runMRTests( loop, false );
+ runFinalizeTests( loop, false );
+
+ var loop = function() {
+ while( 1 ) {
+ db.jstests_mr_killop.count( { a:1 } );
+ }
+ }
+ runMRTests( loop, false );
+ // db can't be accessed from finalize() so not running that test
+
+ /** Test that we can kill the child op of a map reduce op */
+ var loop = function() {
+ db.jstests_mr_killop.find( {$where:function() { while( 1 ) { ; } }} ).toArray();
+ }
+ runMRTests( loop, true );
+
+}
diff --git a/jstests/mr_merge.js b/jstests/mr_merge.js
new file mode 100644
index 0000000..c008ebb
--- /dev/null
+++ b/jstests/mr_merge.js
@@ -0,0 +1,51 @@
+
+t = db.mr_merge;
+t.drop();
+
+t.insert( { a : [ 1 , 2 ] } )
+t.insert( { a : [ 2 , 3 ] } )
+t.insert( { a : [ 3 , 4 ] } )
+
+outName = "mr_merge_out";
+out = db[outName];
+out.drop();
+
+m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); }
+r = function(k,vs){ return Array.sum( vs ); }
+
+function tos( o ){
+ var s = "";
+ for ( var i=0; i<100; i++ ){
+ if ( o[i] )
+ s += i + "_" + o[i];
+ }
+ return s;
+}
+
+
+res = t.mapReduce( m , r , { out : outName } )
+
+
+expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 }
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
+
+t.insert( { a : [ 4 , 5 ] } )
+out.insert( { _id : 10 , value : "5" } )
+res = t.mapReduce( m , r , { out : outName } )
+
+expected["4"]++;
+expected["5"] = 1
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+
+t.insert( { a : [ 5 , 6 ] } )
+out.insert( { _id : 10 , value : "5" } )
+res = t.mapReduce( m , r , { out : { merge : outName } } )
+
+expected["5"]++;
+expected["10"] = 5
+expected["6"] = 1
+
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "C" );
+
+
+
diff --git a/jstests/mr_optim.js b/jstests/mr_optim.js
new file mode 100644
index 0000000..30ab602
--- /dev/null
+++ b/jstests/mr_optim.js
@@ -0,0 +1,47 @@
+
+
+t = db.mr_optim;
+t.drop();
+
+for (var i = 0; i < 1000; ++i) {
+ t.save( {a: Math.random(1000), b: Math.random(10000)} );
+}
+
+function m(){
+ emit(this._id, 13);
+}
+
+function r( key , values ){
+ return "bad";
+}
+
+function reformat( r ){
+ var x = {};
+ var cursor;
+ if ( r.results )
+ cursor = r.results;
+ else
+ cursor = r.find();
+ cursor.forEach(
+ function(z){
+ x[z._id] = z.value;
+ }
+ );
+ return x;
+}
+
+res = t.mapReduce( m , r , { out : "mr_optim_out" } );
+printjson( res )
+x = reformat( res );
+for (var key in x) {
+ assert.eq(x[key], 13, "value is not equal to original, maybe reduce has run");
+}
+res.drop();
+
+res = t.mapReduce( m , r , { out : { inline : 1 } } );
+//printjson( res )
+x2 = reformat( res );
+res.drop();
+
+assert.eq(x, x2, "object from inline and collection are not equal")
+
diff --git a/jstests/mr_outreduce.js b/jstests/mr_outreduce.js
new file mode 100644
index 0000000..87cba98
--- /dev/null
+++ b/jstests/mr_outreduce.js
@@ -0,0 +1,41 @@
+
+t = db.mr_outreduce;
+t.drop();
+
+t.insert( { _id : 1 , a : [ 1 , 2 ] } )
+t.insert( { _id : 2 , a : [ 2 , 3 ] } )
+t.insert( { _id : 3 , a : [ 3 , 4 ] } )
+
+outName = "mr_outreduce_out";
+out = db[outName];
+out.drop();
+
+m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); }
+r = function(k,vs){ return Array.sum( vs ); }
+
+function tos( o ){
+ var s = "";
+ for ( var i=0; i<100; i++ ){
+ if ( o[i] )
+ s += i + "_" + o[i] + "|"
+ }
+ return s;
+}
+
+
+res = t.mapReduce( m , r , { out : outName } )
+
+
+expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 }
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
+
+t.insert( { _id : 4 , a : [ 4 , 5 ] } )
+out.insert( { _id : 10 , value : "5" } ) // this is a sentinal to make sure it wasn't killed
+res = t.mapReduce( m , r , { out : { reduce : outName } , query : { _id : { $gt : 3 } } } )
+
+expected["4"]++;
+expected["5"] = 1
+expected["10"] = 5
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+
+
diff --git a/jstests/mr_outreduce2.js b/jstests/mr_outreduce2.js
new file mode 100644
index 0000000..fc27363
--- /dev/null
+++ b/jstests/mr_outreduce2.js
@@ -0,0 +1,27 @@
+
+normal = "mr_outreduce2"
+out = normal + "_out"
+
+t = db[normal]
+t.drop();
+
+db[out].drop()
+
+t.insert( { _id : 1 , x : 1 } )
+t.insert( { _id : 2 , x : 1 } )
+t.insert( { _id : 3 , x : 2 } )
+
+m = function(){ emit( this.x , 1 ); }
+r = function(k,v){ return Array.sum( v ); }
+
+res = t.mapReduce( m , r , { out : { reduce : out } , query : { _id : { $gt : 0 } } } )
+
+assert.eq( 2 , db[out].findOne( { _id : 1 } ).value , "A1" )
+assert.eq( 1 , db[out].findOne( { _id : 2 } ).value , "A2" )
+
+
+t.insert( { _id : 4 , x : 2 } )
+res = t.mapReduce( m , r , { out : { reduce : out } , query : { _id : { $gt : 3 } } , finalize : null } )
+
+assert.eq( 2 , db[out].findOne( { _id : 1 } ).value , "B1" )
+assert.eq( 2 , db[out].findOne( { _id : 2 } ).value , "B2" )
diff --git a/jstests/mr_replaceIntoDB.js b/jstests/mr_replaceIntoDB.js
new file mode 100644
index 0000000..217f407
--- /dev/null
+++ b/jstests/mr_replaceIntoDB.js
@@ -0,0 +1,45 @@
+
+t = db.mr_replace;
+t.drop();
+
+t.insert( { a : [ 1 , 2 ] } )
+t.insert( { a : [ 2 , 3 ] } )
+t.insert( { a : [ 3 , 4 ] } )
+
+outCollStr = "mr_replace_col";
+outDbStr = "mr_db";
+
+m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); }
+r = function(k,vs){ return Array.sum( vs ); }
+
+function tos( o ){
+ var s = "";
+ for ( var i=0; i<100; i++ ){
+ if ( o[i] )
+ s += i + "_" + o[i];
+ }
+ return s;
+}
+
+print("Testing mr replace into other DB")
+res = t.mapReduce( m , r , { out : { replace: outCollStr, db: outDbStr } } )
+printjson( res );
+expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 };
+outDb = db.getMongo().getDB(outDbStr);
+outColl = outDb[outCollStr];
+str = tos( outColl.convertToSingleObject("value") )
+print("Received result: " + str);
+assert.eq( tos( expected ) , str , "A Received wrong result " + str );
+
+print("checking result field");
+assert.eq(res.result.collection, outCollStr, "B1 Wrong collection " + res.result.collection)
+assert.eq(res.result.db, outDbStr, "B2 Wrong db " + res.result.db)
+
+print("Replace again and check");
+outColl.save({_id: "5", value : 1});
+t.mapReduce( m , r , { out : { replace: outCollStr, db: outDbStr } } )
+str = tos( outColl.convertToSingleObject("value") )
+print("Received result: " + str);
+assert.eq( tos( expected ) , str , "C1 Received wrong result " + str );
+
+
diff --git a/jstests/mr_sort.js b/jstests/mr_sort.js
index 7692062..cc8db18 100644
--- a/jstests/mr_sort.js
+++ b/jstests/mr_sort.js
@@ -24,17 +24,17 @@ r = function( k , v ){
}
-res = t.mapReduce( m , r );
+res = t.mapReduce( m , r , "mr_sort_out " );
x = res.convertToSingleObject();
res.drop();
assert.eq( { "a" : 55 } , x , "A1" )
-res = t.mapReduce( m , r , { query : { x : { $lt : 3 } } } )
+res = t.mapReduce( m , r , { out : "mr_sort_out" , query : { x : { $lt : 3 } } } )
x = res.convertToSingleObject();
res.drop();
assert.eq( { "a" : 3 } , x , "A2" )
-res = t.mapReduce( m , r , { sort : { x : 1 } , limit : 2 } );
+res = t.mapReduce( m , r , { out : "mr_sort_out" , sort : { x : 1 } , limit : 2 } );
x = res.convertToSingleObject();
res.drop();
assert.eq( { "a" : 3 } , x , "A3" )
diff --git a/jstests/multiClient/rsDurKillRestart1.js b/jstests/multiClient/rsDurKillRestart1.js
new file mode 100644
index 0000000..03e658b
--- /dev/null
+++ b/jstests/multiClient/rsDurKillRestart1.js
@@ -0,0 +1,139 @@
+/* NOTE: This test requires mongo shell to be built with V8 javascript engines so
+fork() is available */
+
+/*
+1. Starts up a replica set with 2 servers and 1 arbiter, all with --dur option.
+2. Loads 10000 1K docs into a collection
+3. Forks 5 client threads, each $pushes then $pulls its own id to/from the same array in all document (multi-update)
+5. A 6th thread kills a random server in the replica set every 0-60 secs then restarts it 0-30 secs later.
+-Tony */
+
+load('jstests/libs/fun.js')
+load('jstests/libs/concurrent.js')
+
+function random(n) {
+ return Math.floor(Math.random() * n)
+}
+
+function makeText(size) {
+ var text = ''
+ for (var i = 0; i < size; i++) text += 'a'
+ return text
+}
+
+function checkEqual (value, expected) {
+ if (value != expected) throw ('expected ' + expected + ' got ' + value)
+}
+
+function deploy() {
+ var rs = new ReplSetTest({nodes: 3, oplogSize: 1000})
+ rs.startSet({dur: null})
+ var cfg = rs.getReplSetConfig()
+ cfg.members[2]['arbiterOnly'] = true
+ rs.initiate(cfg)
+ rs.awaitReplication()
+ return rs
+}
+
+function confirmWrite(db) {
+ var cmd = {getlasterror: 1, fsync: true, w: 2}
+ var res = db.runCommand(cmd)
+ if (! res.ok) throw (tojson(cmd) + 'failed: ' + tojson(res))
+}
+
+N = 10000
+Text = makeText(1000)
+
+function loadInitialData(rs) {
+ var db = rs.getMaster().getDB('test')
+ for (var i = 0; i < N; i++) db['col'].insert({x: i, text: Text})
+ confirmWrite(db)
+}
+
+function newMasterConnection(ports) {
+ for (var i = 0; i < ports.length; i++) {
+ try {
+ print ('Try connect to '+ i)
+ var conn = new Mongo("127.0.0.1:" + ports[i])
+ var rec = conn.getDB('admin').runCommand({ismaster: 1})
+ if (rec && rec.ok && rec['ismaster']) {
+ print ('Connected ' + i)
+ return conn }
+ // else close conn
+ } catch(e) {}
+ }
+ throw 'no master: ' + ports
+}
+
+function rsMaster(ports, oldConn) {
+ try {
+ var rec = oldConn.getDB('admin').runCommand({ismaster: 1})
+ if (rec['ismaster']) return oldConn
+ } catch (e) {}
+ return newMasterConnection(ports)
+}
+
+function queryAndUpdateData(ports) {return function(z) {
+ var conn = null
+ return function(i) {
+ function printFailure(e) {print ('Q&U' + z + '-' + i + ': ' + e)}
+ try {
+ sleep(1000 + (z * 500))
+ print('update ' + z + ' round ' + i)
+ var db
+ try {
+ conn = rsMaster(ports, conn)
+ db = conn.getDB('test')
+ } catch (e) {
+ printFailure(e)
+ return
+ }
+ var n
+ try {
+ db['col'].update({}, {$push: {'z': z}}, false, true)
+ n = db['col'].count({'z': z})
+ } catch (e) {
+ printFailure(e)
+ return
+ }
+ checkEqual (n, N)
+ sleep(1000)
+ try {
+ db['col'].update({}, {$pull: {'z': z}}, false, true)
+ n = db['col'].count({'z': z})
+ } catch (e) {
+ printFailure(e)
+ return
+ }
+ checkEqual (n, 0)
+ } catch (e) {throw ('(Q&U' + z + '-' + i + ') ' + e)}
+ }
+}}
+
+function killer(rs) {return function(i) {
+ try {
+ sleep(random(30) * 1000)
+ var r = random(rs.ports.length - 1)
+ print('Killing ' + r)
+ stopMongod(rs.getPort(r), 9) // hard kill
+ sleep(random(30) * 1000)
+ print('Restarting ' + r)
+ rs.restart(r, {dur: null})
+ } catch (e) {throw ('(Killer-' + i + ') ' + e)}
+}}
+
+function rsPorts(rs) {
+ ports = new Array()
+ for (var i = 0; i < rs.ports.length; i++) ports[i] = rs.getPort(i)
+ return ports
+}
+
+function go(numRounds) {
+ var rs = deploy()
+ loadInitialData(rs)
+ var jobs = map(queryAndUpdateData(rsPorts(rs)), [1,2,3,4,5])
+ parallel (numRounds, jobs, [killer(rs)])
+ sleep (2000)
+ rs.stopSet()
+ print("rsDurKillRestart1.js SUCCESS")
+}
diff --git a/jstests/ne2.js b/jstests/ne2.js
new file mode 100644
index 0000000..89e99aa
--- /dev/null
+++ b/jstests/ne2.js
@@ -0,0 +1,21 @@
+// check that we don't scan $ne values
+
+t = db.jstests_ne2;
+t.drop();
+t.ensureIndex( {a:1} );
+
+t.save( { a:-0.5 } );
+t.save( { a:0 } );
+t.save( { a:0 } );
+t.save( { a:0.5 } );
+
+e = t.find( { a: { $ne: 0 } } ).explain( true );
+assert.eq( "BtreeCursor a_1 multi", e.cursor );
+assert.eq( 0, e.indexBounds.a[ 0 ][ 1 ] );
+assert.eq( 0, e.indexBounds.a[ 1 ][ 0 ] );
+assert.eq( 3, e.nscanned );
+
+e = t.find( { a: { $gt: -1, $lt: 1, $ne: 0 } } ).explain();
+assert.eq( "BtreeCursor a_1 multi", e.cursor );
+assert.eq( { a: [ [ -1, 0 ], [ 0, 1 ] ] }, e.indexBounds );
+assert.eq( 3, e.nscanned );
diff --git a/jstests/ne3.js b/jstests/ne3.js
new file mode 100644
index 0000000..489f47a
--- /dev/null
+++ b/jstests/ne3.js
@@ -0,0 +1,12 @@
+// don't allow most operators with regex
+
+t = db.jstests_ne3;
+t.drop();
+
+assert.throws( function() { t.findOne( { t: { $ne: /a/ } } ); } );
+assert.throws( function() { t.findOne( { t: { $gt: /a/ } } ); } );
+assert.throws( function() { t.findOne( { t: { $gte: /a/ } } ); } );
+assert.throws( function() { t.findOne( { t: { $lt: /a/ } } ); } );
+assert.throws( function() { t.findOne( { t: { $lte: /a/ } } ); } );
+
+assert.eq( 0, t.count( { t: { $in: [ /a/ ] } } ) ); \ No newline at end of file
diff --git a/jstests/not2.js b/jstests/not2.js
index dcd4535..b588ebd 100644
--- a/jstests/not2.js
+++ b/jstests/not2.js
@@ -97,7 +97,7 @@ indexed = function( query, min, max ) {
assert.eq( exp.indexBounds[ i ][0][0], min );
}
for( i in exp.indexBounds ) {
- assert.eq( exp.indexBounds[ i ][0][1], max );
+ assert.eq( exp.indexBounds[ i ][exp.indexBounds[ i ].length - 1][1], max );
}
}
@@ -109,7 +109,7 @@ not = function( query ) {
}
indexed( {i:1}, 1, 1 );
-not( {i:{$ne:1}} );
+indexed( {i:{$ne:1}}, {$minElement:1}, {$maxElement:1} );
indexed( {i:{$not:{$ne:"a"}}}, "a", "a" );
not( {i:{$not:/^a/}} );
@@ -138,5 +138,6 @@ not( {i:{$not:{$in:[1]}}} );
t.drop();
t.ensureIndex( {"i.j":1} );
indexed( {i:{$elemMatch:{j:1}}}, 1, 1 );
+//indexed( {i:{$not:{$elemMatch:{j:1}}}}, {$minElement:1}, {$maxElement:1} );
not( {i:{$not:{$elemMatch:{j:1}}}} );
indexed( {i:{$not:{$elemMatch:{j:{$ne:1}}}}}, 1, 1 );
diff --git a/jstests/notablescan.js b/jstests/notablescan.js
new file mode 100644
index 0000000..2e8cb0c
--- /dev/null
+++ b/jstests/notablescan.js
@@ -0,0 +1,22 @@
+// check notablescan mode
+
+t = db.test_notablescan;
+t.drop();
+
+try {
+ assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:true } ) );
+ // commented lines are SERVER-2222
+// assert.throws( function() { t.find( {a:1} ).toArray(); } );
+ t.save( {a:1} );
+// assert.throws( function() { t.count( {a:1} ); } );
+// assert.throws( function() { t.find( {} ).toArray(); } );
+ assert.throws( function() { t.find( {a:1} ).toArray(); } );
+ assert.throws( function() { t.find( {a:1} ).hint( {$natural:1} ).toArray(); } );
+ t.ensureIndex( {a:1} );
+ assert.eq( 0, t.find( {a:1,b:1} ).itcount() );
+ assert.eq( 1, t.find( {a:1,b:null} ).itcount() );
+} finally {
+ // We assume notablescan was false before this test started and restore that
+ // expected value.
+ assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:false } ) );
+}
diff --git a/jstests/objid5.js b/jstests/objid5.js
index 9a26839..f85ebc8 100644
--- a/jstests/objid5.js
+++ b/jstests/objid5.js
@@ -6,13 +6,14 @@ t.save( { _id : 5.5 } );
assert.eq( 18 , Object.bsonsize( t.findOne() ) , "A" );
x = db.runCommand( { features : 1 } )
-y = db.runCommand( { features : 1 , oidReset : 1 } )
-
-if( !x.ok )
+y = db.runCommand( { features : 1 , oidReset : 1 } )
+
+if( !x.ok )
print("x: " + tojson(x));
assert( x.oidMachine , "B1" )
assert.neq( x.oidMachine , y.oidMachine , "B2" )
assert.eq( x.oidMachine , y.oidMachineOld , "B3" )
-assert.eq( 18 , Object.bsonsize( { _id : 7.7 } ) , "C" )
+assert.eq( 18 , Object.bsonsize( { _id : 7.7 } ) , "C1" )
+assert.eq( 0 , Object.bsonsize( null ) , "C2" )
diff --git a/jstests/or4.js b/jstests/or4.js
index af8704b..f793f36 100644
--- a/jstests/or4.js
+++ b/jstests/or4.js
@@ -1,6 +1,13 @@
t = db.jstests_or4;
t.drop();
+// v8 does not have a builtin Array.sort
+if (!Array.sort) {
+ Array.sort = function(arr) {
+ return arr.sort();
+ };
+}
+
checkArrs = function( a, b ) {
m = "[" + a + "] != [" + b + "]";
a = eval( a );
@@ -66,10 +73,10 @@ assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).snapshot().toArray().leng
t.save( {a:1,b:3} );
assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).batchSize(-4).toArray().length" );
-assert.eq.automsg( "[1,2]", "t.distinct( 'a', {$or:[{a:2},{b:3}]} )" );
+assert.eq.automsg( "[1,2]", "Array.sort( t.distinct( 'a', {$or:[{a:2},{b:3}]} ) )" );
assert.eq.automsg( "[{a:2},{a:null},{a:1}]", "t.group( {key:{a:1}, cond:{$or:[{a:2},{b:3}]}, reduce:function( x, y ) { }, initial:{} } )" );
-assert.eq.automsg( "5", "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {query:{$or:[{a:2},{b:3}]}} ).counts.input" );
+assert.eq.automsg( "5", "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {out:{inline:true},query:{$or:[{a:2},{b:3}]}} ).counts.input" );
explain = t.find( {$or:[{a:2},{b:3}]} ).explain();
assert.eq.automsg( "2", "explain.clauses.length" );
diff --git a/jstests/or6.js b/jstests/or6.js
index 3800c78..097965b 100644
--- a/jstests/or6.js
+++ b/jstests/or6.js
@@ -7,8 +7,8 @@ assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:2}},{a:{$gt:0}}]} ).explain().cla
assert.eq.automsg( "2", "t.find( {$or:[{a:{$lt:2}},{a:{$lt:4}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 0 ]" );
assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:5}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 1 ]" );
-assert.eq.automsg( "0", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:15}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 0 ]" );
-assert.eq.automsg( "15", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:15}}]} ).explain().clauses[ 1 ].indexBounds.a[ 0 ][ 1 ]" );
+
+assert.eq( [ [ 0, 2 ], [ 10, 15 ] ], t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:0,$lt:15}}]} ).explain().clauses[ 1 ].indexBounds.a );
// no separate clauses
assert.eq.automsg( "null", "t.find( {$or:[{a:{$gt:2,$lt:10}},{a:{$gt:3,$lt:5}}]} ).explain().clauses" );
@@ -19,6 +19,8 @@ assert.eq.automsg( "null", "t.find( {$or:[{a:1},{b:2}]} ).hint( {a:1} ).explain(
assert.eq.automsg( "2", "t.find( {$or:[{a:1},{a:3}]} ).hint( {a:1} ).explain().clauses.length" );
assert.eq.automsg( "'BasicCursor'", "t.find( {$or:[{a:1},{a:3}]} ).hint( {$natural:1} ).explain().cursor" );
+assert.eq( null, t.find( {$or:[{a:{$gt:1,$lt:5},b:6}, {a:3,b:{$gt:0,$lt:10}}]} ).explain().clauses );
+
t.ensureIndex( {b:1} );
assert.eq.automsg( "2", "t.find( {$or:[{a:1,b:5},{a:3,b:5}]} ).hint( {a:1} ).explain().clauses.length" );
@@ -28,4 +30,10 @@ t.ensureIndex( {a:1,b:1} );
assert.eq.automsg( "2", "t.find( {$or:[{a:{$in:[1,2]},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:2},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
assert.eq.automsg( "2", "t.find( {$or:[{a:{$gt:1,$lte:3},b:5}, {a:2,b:6}]} ).explain().clauses.length" );
-assert.eq.automsg( "null", "t.find( {$or:[{a:{$in:[1,2]}}, {a:2}]} ).explain().clauses" ); \ No newline at end of file
+assert.eq.automsg( "null", "t.find( {$or:[{a:{$in:[1,2]}}, {a:2}]} ).explain().clauses" );
+
+assert.eq( null, t.find( {$or:[{a:{$gt:1,$lt:5},b:{$gt:0,$lt:3},c:6}, {a:3,b:{$gt:1,$lt:2},c:{$gt:0,$lt:10}}]} ).explain().clauses );
+assert.eq( null, t.find( {$or:[{a:{$gt:1,$lt:5},c:6}, {a:3,b:{$gt:1,$lt:2},c:{$gt:0,$lt:10}}]} ).explain().clauses );
+exp = t.find( {$or:[{a:{$gt:1,$lt:5},b:{$gt:0,$lt:3},c:6}, {a:3,b:{$gt:1,$lt:4},c:{$gt:0,$lt:10}}]} ).explain();
+assert.eq( 3, exp.clauses[ 1 ].indexBounds.b[ 0 ][ 0 ] );
+assert.eq( 4, exp.clauses[ 1 ].indexBounds.b[ 0 ][ 1 ] );
diff --git a/jstests/orc.js b/jstests/orc.js
new file mode 100644
index 0000000..0ea7f19
--- /dev/null
+++ b/jstests/orc.js
@@ -0,0 +1,29 @@
+// test that or duplicates are dropped in certain special cases
+t = db.jstests_orc;
+t.drop();
+
+// The goal here will be to ensure the full range of valid values is scanned for each or clause, in order to ensure that
+// duplicates are eliminated properly in the cases below when field range elimination is not employed. The deduplication
+// of interest will occur on field a. The range specifications for fields b and c are such that (in the current
+// implementation) field range elimination will not occur between the or clauses, meaning that the full range of valid values
+// will be scanned for each clause and deduplication will be forced.
+
+// NOTE This test uses some tricks to avoid or range elimination, but in future implementations these tricks may not apply.
+// Perhaps it would be worthwhile to create a mode where range elimination is disabled so it will be possible to write a more
+// robust test.
+
+t.ensureIndex( {a:-1,b:1,c:1} );
+
+// sanity test
+t.save( {a:null,b:4,c:4} );
+assert.eq( 1, t.count( {$or:[{a:null,b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:null,b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
+
+// from here on is SERVER-2245
+t.remove();
+t.save( {b:4,c:4} );
+assert.eq( 1, t.count( {$or:[{a:null,b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:null,b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
+
+//t.remove();
+//t.save( {a:[],b:4,c:4} );
+//printjson( t.find( {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ).explain() );
+//assert.eq( 1, t.count( {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
diff --git a/jstests/ord.js b/jstests/ord.js
new file mode 100644
index 0000000..4612f21
--- /dev/null
+++ b/jstests/ord.js
@@ -0,0 +1,34 @@
+// check that we don't crash if an index used by an earlier or clause is dropped
+
+// Dropping an index kills all cursors on the indexed namespace, not just those
+// cursors using the dropped index. This test is to serve as a reminder that
+// the $or implementation may need minor adjustments (memory ownership) if this
+// behavior is changed.
+
+t = db.jstests_ord;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+for( i = 0; i < 80; ++i ) {
+ t.save( {a:1} );
+}
+
+for( i = 0; i < 100; ++i ) {
+ t.save( {b:1} );
+}
+
+c = t.find( { $or: [ {a:1}, {b:1} ] } ).batchSize( 100 );
+for( i = 0; i < 90; ++i ) {
+ c.next();
+}
+// At this point, our initial query has ended and there is a client cursor waiting
+// to read additional documents from index {b:1}. Deduping is performed against
+// the index key {a:1}
+
+t.dropIndex( {a:1} );
+
+// Dropping an index kills all cursors on the indexed namespace, not just those
+// cursors using the dropped index.
+assert.throws( c.next() );
diff --git a/jstests/ore.js b/jstests/ore.js
new file mode 100644
index 0000000..3c105c1
--- /dev/null
+++ b/jstests/ore.js
@@ -0,0 +1,13 @@
+// verify that index direction is considered when deduping based on an earlier
+// index
+
+t = db.jstests_ore;
+t.drop();
+
+t.ensureIndex( {a:-1} )
+t.ensureIndex( {b:1} );
+
+t.save( {a:1,b:1} );
+t.save( {a:2,b:1} );
+
+assert.eq( 2, t.count( {$or:[{a:{$in:[1,2]}},{b:1}]} ) ); \ No newline at end of file
diff --git a/jstests/orf.js b/jstests/orf.js
new file mode 100644
index 0000000..eb6be7a
--- /dev/null
+++ b/jstests/orf.js
@@ -0,0 +1,15 @@
+// Test a query with 200 $or clauses
+
+t = db.jstests_orf;
+t.drop();
+
+a = [];
+for( var i = 0; i < 200; ++i ) {
+ a.push( {_id:i} );
+}
+a.forEach( function( x ) { t.save( x ); } );
+
+explain = t.find( {$or:a} ).explain();
+assert.eq( 200, explain.n );
+assert.eq( 200, explain.clauses.length );
+assert.eq( 200, t.count( {$or:a} ) );
diff --git a/jstests/parallel/del.js b/jstests/parallel/del.js
new file mode 100644
index 0000000..c6eb500
--- /dev/null
+++ b/jstests/parallel/del.js
@@ -0,0 +1,79 @@
+
+
+N = 1000;
+
+HOST = db.getMongo().host
+
+DONE = false;
+
+function del1( dbname ){
+ var m = new Mongo( HOST )
+ var db = m.getDB( "foo" + dbname );
+ var t = db.del
+
+ while ( ! DONE ){
+ var r = Math.random();
+ var n = Math.floor( Math.random() * N );
+ if ( r < .9 ){
+ t.insert( { x : n } )
+ }
+ else if ( r < .98 ){
+ t.remove( { x : n } );
+ }
+ else if ( r < .99 ){
+ t.remove( { x : { $lt : n } } )
+ }
+ else {
+ t.remove( { x : { $gt : n } } );
+ }
+ if ( r > .9999 )
+ print( t.count() )
+ }
+}
+
+function del2( dbname ){
+ var m = new Mongo( HOST )
+ var db = m.getDB( "foo" + dbname );
+ var t = db.del
+
+ while ( ! DONE ){
+ var r = Math.random();
+ var n = Math.floor( Math.random() * N );
+ var s = Math.random() > .5 ? 1 : -1;
+
+ if ( r < .5 ){
+ t.findOne( { x : n } )
+ }
+ else if ( r < .75 ){
+ t.find( { x : { $lt : n } } ).sort( { x : s } ).itcount();
+ }
+ else {
+ t.find( { x : { $gt : n } } ).sort( { x : s } ).itcount();
+ }
+ }
+}
+
+all = []
+
+all.push( fork( del1 , "a" ) )
+all.push( fork( del2 , "a" ) )
+all.push( fork( del1 , "b" ) )
+all.push( fork( del2 , "b" ) )
+
+for ( i=0; i<all.length; i++ )
+ all[i].start()
+
+a = db.getSisterDB( "fooa" )
+b = db.getSisterDB( "foob" )
+
+for ( i=0; i<10; i++ ){
+ sleep( 2000 )
+ print( "dropping" )
+ a.dropDatabase();
+ b.dropDatabase();
+}
+
+DONE = true;
+
+all[0].join()
+
diff --git a/jstests/parallel/repl.js b/jstests/parallel/repl.js
index cb9b770..919b0d7 100644
--- a/jstests/parallel/repl.js
+++ b/jstests/parallel/repl.js
@@ -1,4 +1,4 @@
-// test all operations in parallel
+// test basic operations in parallel, with replication
baseName = "parallel_repl"
@@ -25,7 +25,7 @@ for( id = 0; id < 10; ++id ) {
g.addRemove( { _id:Random.randInt( 1000 ) } );
break;
case 2: // update
- g.addUpdate( {_id:{$lt:1000}}, {a:{$inc:5}} );
+ g.addUpdate( {_id:{$lt:1000}}, {$inc:{a:5}} );
break;
default:
assert( false, "Invalid op code" );
diff --git a/jstests/perf/geo_near1.js b/jstests/perf/geo_near1.js
new file mode 100644
index 0000000..c999483
--- /dev/null
+++ b/jstests/perf/geo_near1.js
@@ -0,0 +1,11 @@
+var t = db.bench.geo_near1;
+t.drop()
+
+var numPts = 1000*1000;
+
+
+for (var i=0; i < numPts; i++){
+ x = (Math.random() * 100) - 50;
+ y = (Math.random() * 100) - 50;
+ t.insert({loc: [x,y], i: i});
+}
diff --git a/jstests/profile1.js b/jstests/profile1.js
index 49f6838..0e8009a 100644
--- a/jstests/profile1.js
+++ b/jstests/profile1.js
@@ -1,4 +1,6 @@
+try {
+
/* With pre-created system.profile (capped) */
db.runCommand({profile: 0});
db.getCollection("system.profile").drop();
@@ -40,3 +42,8 @@ assert.eq(2, db.runCommand({profile: -1}).was, "I");
assert.eq(1, db.system.profile.stats().capped, "J");
var auto_size = db.system.profile.storageSize();
assert.gt(auto_size, capped_size, "K");
+
+} finally {
+ // disable profiling for subsequent tests
+ assert.commandWorked( db.runCommand( {profile:0} ) );
+} \ No newline at end of file
diff --git a/jstests/proj_key1.js b/jstests/proj_key1.js
new file mode 100644
index 0000000..ad944f7
--- /dev/null
+++ b/jstests/proj_key1.js
@@ -0,0 +1,28 @@
+
+t = db.proj_key1;
+t.drop();
+
+as = []
+
+for ( i=0; i<10; i++ ){
+ as.push( { a : i } )
+ t.insert( { a : i , b : i } );
+}
+
+assert( ! t.find( {} , { a : 1 } ).explain().indexOnly , "A1" )
+
+t.ensureIndex( { a : 1 } )
+
+assert( t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).explain().indexOnly , "A2" )
+
+assert( ! t.find( { a : { $gte : 0 } } , { a : 1 } ).explain().indexOnly , "A3" ) // because id _id
+
+// assert( t.find( {} , { a : 1 , _id : 0 } ).explain().indexOnly , "A4" ); // TODO: need to modify query optimier SERVER-2109
+
+assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).toArray() , "B1" )
+assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).batchSize(2).toArray() , "B1" )
+
+
+
+
+
diff --git a/jstests/pull_remove1.js b/jstests/pull_remove1.js
new file mode 100644
index 0000000..379f3f2
--- /dev/null
+++ b/jstests/pull_remove1.js
@@ -0,0 +1,14 @@
+
+t = db.pull_remove1
+t.drop()
+
+o = { _id : 1 , a : [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ] }
+t.insert( o )
+
+assert.eq( o , t.findOne() , "A1" )
+
+o.a = o.a.filter( function(z){ return z >= 6; } )
+t.update( {} , { $pull : { a : { $lt : 6 } } } )
+
+assert.eq( o.a , t.findOne().a , "A2" )
+
diff --git a/jstests/push2.js b/jstests/push2.js
index 943ec11..b976169 100644
--- a/jstests/push2.js
+++ b/jstests/push2.js
@@ -18,3 +18,5 @@ for ( x=0; x<200; x++ ){
}
assert( gotError , "should have gotten error" );
+
+t.drop();
diff --git a/jstests/queryoptimizer2.js b/jstests/queryoptimizer2.js
new file mode 100644
index 0000000..af21e95
--- /dev/null
+++ b/jstests/queryoptimizer2.js
@@ -0,0 +1,62 @@
+
+t = db.queryoptimizer2;
+
+function doTest( f1, f2 ) {
+
+t.drop()
+
+for( i = 0; i < 30; ++i ) {
+ t.save( { a:2 } );
+}
+
+for( i = 0; i < 30; ++i ) {
+ t.save( { b:2 } );
+}
+
+for( i = 0; i < 60; ++i ) {
+ t.save( { c:2 } );
+}
+
+t.ensureIndex( { a:1 } );
+t.ensureIndex( { b:1 } );
+
+e = t.find( { b:2 } ).batchSize( 100 ).explain( true );
+assert.eq( null, e.oldPlan );
+
+t.ensureIndex( { c:1 } ); // will clear query cache
+
+f1();
+
+assert( t.find( { a:2 } ).batchSize( 100 ).explain( true ).oldPlan );
+assert( t.find( { b:2 } ).batchSize( 100 ).explain( true ).oldPlan );
+
+e = t.find( { c:2 } ).batchSize( 100 ).explain( true );
+// no pattern should be recorded as a result of the $or query
+assert.eq( null, e.oldPlan );
+
+t.dropIndex( { b:1 } ); // clear query cache
+for( i = 0; i < 15; ++i ) {
+ t.save( { a:2 } );
+}
+
+f2();
+// pattern should be recorded, since > half of results returned from this index
+assert( t.find( { c:2 } ).batchSize( 100 ).explain( true ).oldPlan );
+
+}
+
+doTest( function() {
+ t.find( { $or: [ { a:2 }, { b:2 }, { c:2 } ] } ).batchSize( 100 ).toArray();
+ },
+ function() {
+ t.find( { $or: [ { a:2 }, { c:2 } ] } ).batchSize( 100 ).toArray();
+ }
+ );
+
+doTest( function() {
+ t.find( { $or: [ { a:2 }, { b:2 }, { c:2 } ] } ).limit( 100 ).count( true );
+ },
+ function() {
+ t.find( { $or: [ { a:2 }, { c:2 } ] } ).limit( 100 ).count( true );
+ }
+ );
diff --git a/jstests/regex3.js b/jstests/regex3.js
index ee8d9cf..7d703aa 100644
--- a/jstests/regex3.js
+++ b/jstests/regex3.js
@@ -23,7 +23,7 @@ t.save( { name : "c" } );
assert.eq( 3 , t.find( { name : /^aa*/ } ).count() , "B ni" );
t.ensureIndex( { name : 1 } );
assert.eq( 3 , t.find( { name : /^aa*/ } ).count() , "B i 1" );
-assert.eq( 3 , t.find( { name : /^aa*/ } ).explain().nscanned , "B i 1 e" );
+assert.eq( 4 , t.find( { name : /^aa*/ } ).explain().nscanned , "B i 1 e" );
assert.eq( 2 , t.find( { name : /^a[ab]/ } ).count() , "B i 2" );
assert.eq( 2 , t.find( { name : /^a[bc]/ } ).count() , "B i 3" );
diff --git a/jstests/regex6.js b/jstests/regex6.js
index 12ed85b..8243313 100644
--- a/jstests/regex6.js
+++ b/jstests/regex6.js
@@ -10,10 +10,10 @@ t.save( { name : "aaron" } );
t.ensureIndex( { name : 1 } );
assert.eq( 0 , t.find( { name : /^\// } ).count() , "index count" );
-assert.eq( 0 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1" );
+assert.eq( 1 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1" );
assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" );
assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" );
-assert.eq( 0 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
+assert.eq( 1 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
diff --git a/jstests/regex9.js b/jstests/regex9.js
index 559efd9..896855c 100644
--- a/jstests/regex9.js
+++ b/jstests/regex9.js
@@ -1,5 +1,5 @@
-t = db.regex3;
+t = db.regex9;
t.drop();
t.insert( { _id : 1 , a : [ "a" , "b" , "c" ] } )
diff --git a/jstests/remove_undefined.js b/jstests/remove_undefined.js
new file mode 100644
index 0000000..d5344a3
--- /dev/null
+++ b/jstests/remove_undefined.js
@@ -0,0 +1,28 @@
+
+t = db.drop_undefined.js
+
+t.insert( { _id : 1 } )
+t.insert( { _id : 2 } )
+t.insert( { _id : null } )
+
+z = { foo : 1 , x : null }
+
+t.remove( { x : z.bar } )
+assert.eq( 3 , t.count() , "A1" )
+
+t.remove( { x : undefined } )
+assert.eq( 3 , t.count() , "A2" )
+
+assert.throws( function(){ t.remove( { _id : z.bar } ) } , null , "B1" )
+assert.throws( function(){ t.remove( { _id : undefined } ) } , null , "B2" )
+
+
+t.remove( { _id : z.x } )
+assert.eq( 2 , t.count() , "C1" )
+
+t.insert( { _id : null } )
+assert.eq( 3 , t.count() , "C2" )
+
+assert.throws( function(){ t.remove( { _id : undefined } ) } , null, "C3" )
+assert.eq( 3 , t.count() , "C4" )
+
diff --git a/jstests/rename4.js b/jstests/rename4.js
new file mode 100644
index 0000000..29be374
--- /dev/null
+++ b/jstests/rename4.js
@@ -0,0 +1,121 @@
+t = db.jstests_rename4;
+t.drop();
+
+function c( f ) {
+ assert( !db.getLastError(), "error" );
+ eval( f );
+ assert( db.getLastError(), "no error" );
+ db.resetError();
+}
+
+c( "t.update( {}, {$rename:{'a':'a'}} )" );
+c( "t.update( {}, {$rename:{'':'a'}} )" );
+c( "t.update( {}, {$rename:{'a':''}} )" );
+c( "t.update( {}, {$rename:{'_id':'a'}} )" );
+c( "t.update( {}, {$rename:{'a':'_id'}} )" );
+c( "t.update( {}, {$rename:{'_id.a':'b'}} )" );
+c( "t.update( {}, {$rename:{'b':'_id.a'}} )" );
+c( "t.update( {}, {$rename:{'_id.a':'_id.b'}} )" );
+c( "t.update( {}, {$rename:{'_id.b':'_id.a'}} )" );
+c( "t.update( {}, {$rename:{'.a':'b'}} )" );
+c( "t.update( {}, {$rename:{'a':'.b'}} )" );
+c( "t.update( {}, {$rename:{'a.':'b'}} )" );
+c( "t.update( {}, {$rename:{'a':'b.'}} )" );
+c( "t.update( {}, {$rename:{'a.b':'a'}} )" );
+c( "t.update( {}, {$rename:{'a.$':'b'}} )" );
+c( "t.update( {}, {$rename:{'a':'b.$'}} )" );
+c( "t.update( {}, {$set:{b:1},$rename:{'a':'b'}} )" );
+c( "t.update( {}, {$rename:{'a':'b'},$set:{b:1}} )" );
+c( "t.update( {}, {$rename:{'a':'b'},$set:{a:1}} )" );
+c( "t.update( {}, {$set:{'b.c':1},$rename:{'a':'b'}} )" );
+c( "t.update( {}, {$set:{b:1},$rename:{'a':'b.c'}} )" );
+c( "t.update( {}, {$rename:{'a':'b'},$set:{'b.c':1}} )" );
+c( "t.update( {}, {$rename:{'a':'b.c'},$set:{b:1}} )" );
+
+t.save( {a:[1],b:{c:[1]},d:[{e:1}],f:1} );
+c( "t.update( {}, {$rename:{'a.0':'f'}} )" );
+c( "t.update( {}, {$rename:{'a.0':'g'}} )" );
+c( "t.update( {}, {$rename:{'f':'a.0'}} )" );
+c( "t.update( {}, {$rename:{'b.c.0':'f'}} )" );
+c( "t.update( {}, {$rename:{'f':'b.c.0'}} )" );
+c( "t.update( {}, {$rename:{'d.e':'d.f'}} )" );
+c( "t.update( {}, {$rename:{'d.e':'f'}} )" );
+c( "t.update( {}, {$rename:{'d.f':'d.e'}} )" );
+c( "t.update( {}, {$rename:{'f':'d.e'}} )" );
+c( "t.update( {}, {$rename:{'d.0.e':'d.f'}} )" );
+c( "t.update( {}, {$rename:{'d.0.e':'f'}} )" );
+c( "t.update( {}, {$rename:{'d.f':'d.0.e'}} )" );
+c( "t.update( {}, {$rename:{'f':'d.0.e'}} )" );
+c( "t.update( {}, {$rename:{'f.g':'a'}} )" );
+c( "t.update( {}, {$rename:{'a':'f.g'}} )" );
+
+function v( start, mod, expected ) {
+ t.remove();
+ t.save( start );
+ t.update( {}, mod );
+ assert( !db.getLastError() );
+ var got = t.findOne();
+ delete got._id;
+ assert.eq( expected, got );
+}
+
+v( {a:1}, {$rename:{a:'b'}}, {b:1} );
+v( {a:1}, {$rename:{a:'bb'}}, {bb:1} );
+v( {b:1}, {$rename:{b:'a'}}, {a:1} );
+v( {bb:1}, {$rename:{bb:'a'}}, {a:1} );
+v( {a:{y:1}}, {$rename:{'a.y':'a.z'}}, {a:{z:1}} );
+v( {a:{yy:1}}, {$rename:{'a.yy':'a.z'}}, {a:{z:1}} );
+v( {a:{z:1}}, {$rename:{'a.z':'a.y'}}, {a:{y:1}} );
+v( {a:{zz:1}}, {$rename:{'a.zz':'a.y'}}, {a:{y:1}} );
+v( {a:{c:1}}, {$rename:{a:'b'}}, {b:{c:1}} );
+v( {aa:{c:1}}, {$rename:{aa:'b'}}, {b:{c:1}} );
+v( {a:1,b:2}, {$rename:{a:'b'}}, {b:1} );
+v( {aa:1,b:2}, {$rename:{aa:'b'}}, {b:1} );
+v( {a:1,bb:2}, {$rename:{a:'bb'}}, {bb:1} );
+v( {a:1}, {$rename:{a:'b.c'}}, {b:{c:1}} );
+v( {aa:1}, {$rename:{aa:'b.c'}}, {b:{c:1}} );
+v( {a:1,b:{}}, {$rename:{a:'b.c'}}, {b:{c:1}} );
+v( {aa:1,b:{}}, {$rename:{aa:'b.c'}}, {b:{c:1}} );
+v( {a:1}, {$rename:{b:'c'}}, {a:1} );
+v( {aa:1}, {$rename:{b:'c'}}, {aa:1} );
+v( {}, {$rename:{b:'c'}}, {} );
+v( {a:{b:1,c:2}}, {$rename:{'a.b':'d'}}, {a:{c:2},d:1} );
+v( {a:{bb:1,c:2}}, {$rename:{'a.bb':'d'}}, {a:{c:2},d:1} );
+v( {a:{b:1}}, {$rename:{'a.b':'d'}}, {a:{},d:1} );
+v( {a:[5]}, {$rename:{a:'b'}}, {b:[5]} );
+v( {aa:[5]}, {$rename:{aa:'b'}}, {b:[5]} );
+v( {'0':1}, {$rename:{'0':'5'}}, {'5':1} );
+v( {a:1,b:2}, {$rename:{a:'c'},$set:{b:5}}, {b:5,c:1} );
+v( {aa:1,b:2}, {$rename:{aa:'c'},$set:{b:5}}, {b:5,c:1} );
+v( {a:1,b:2}, {$rename:{z:'c'},$set:{b:5}}, {a:1,b:5} );
+v( {aa:1,b:2}, {$rename:{z:'c'},$set:{b:5}}, {aa:1,b:5} );
+
+// (formerly) rewriting single field
+v( {a:{z:1,b:1}}, {$rename:{'a.b':'a.c'}}, {a:{c:1,z:1}} );
+v( {a:{z:1,tomato:1}}, {$rename:{'a.tomato':'a.potato'}}, {a:{potato:1,z:1}} );
+v( {a:{z:1,b:1,c:1}}, {$rename:{'a.b':'a.c'}}, {a:{c:1,z:1}} );
+v( {a:{z:1,tomato:1,potato:1}}, {$rename:{'a.tomato':'a.potato'}}, {a:{potato:1,z:1}} );
+v( {a:{z:1,b:1}}, {$rename:{'a.b':'a.cc'}}, {a:{cc:1,z:1}} );
+v( {a:{z:1,b:1,c:1}}, {$rename:{'a.b':'aa.c'}}, {a:{c:1,z:1},aa:{c:1}} );
+
+// invalid target, but missing source
+v( {a:1,c:4}, {$rename:{b:'c.d'}}, {a:1,c:4} );
+
+// check index
+t.drop();
+t.ensureIndex( {a:1} );
+
+function l( start, mod, query, expected ) {
+ t.remove();
+ t.save( start );
+ t.update( {}, mod );
+ assert( !db.getLastError() );
+ var got = t.find( query ).hint( {a:1} ).next();
+ delete got._id;
+ assert.eq( expected, got );
+}
+
+l( {a:1}, {$rename:{a:'b'}}, {a:null}, {b:1} );
+l( {a:1}, {$rename:{a:'bb'}}, {a:null}, {bb:1} );
+l( {b:1}, {$rename:{b:'a'}}, {a:1}, {a:1} );
+l( {bb:1}, {$rename:{bb:'a'}}, {a:1}, {a:1} );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 701d71e..15fc983 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -60,7 +60,7 @@ r = function( key , v ){
correct = { a : 2 , b : 1 };
function checkMR( t ){
- var res = t.mapReduce( m , r );
+ var res = t.mapReduce( m , r , "basic1_out" );
assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
}
@@ -68,7 +68,7 @@ function checkNumCollections( msg , diff ){
if ( ! diff ) diff = 0;
var m = am.getCollectionNames();
var s = as.getCollectionNames();
- assert.eq( m.length + diff , s.length , "lengths bad \n" + tojson( m ) + "\n" + tojson( s ) );
+ assert.eq( m.length + diff , s.length , msg + " lengths bad \n" + tojson( m ) + "\n" + tojson( s ) );
}
checkNumCollections( "MR1" );
diff --git a/jstests/repl/block2.js b/jstests/repl/block2.js
index 0e34758..f38a4e3 100644
--- a/jstests/repl/block2.js
+++ b/jstests/repl/block2.js
@@ -18,25 +18,26 @@ function check( msg ){
assert.eq( tm.count() , ts.count() , "check: " + msg );
}
+function worked( w , wtimeout ){
+ return dbm.getLastError( w , wtimeout ) == null;
+}
+
check( "A" );
tm.save( { x : 1 } );
-dbm.getLastError( 2 );
-check( "B" );
+assert( worked( 2 ) , "B" );
tm.save( { x : 2 } );
-dbm.getLastError( 2 , 500 );
-check( "C" );
+assert( worked( 2 , 500 ) , "C" )
rt.stop( false );
tm.save( { x : 3 } )
assert.eq( 3 , tm.count() , "D1" );
-assert.throws( function(){ dbm.getLastError( 2 , 500 ); } , "D2" )
+assert( ! worked( 2 , 500 ) , "D2" )
s = rt.start( false )
setup();
-dbm.getLastError( 2 , 30000 )
-check( "D3" )
+assert( worked( 2 , 30000 ) , "E" )
rt.stop();
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
index 9f9334b..4932d5a 100644
--- a/jstests/repl/mastermaster1.js
+++ b/jstests/repl/mastermaster1.js
@@ -6,6 +6,8 @@ ports = allocatePorts( 2 )
left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } )
right = startMongodTest( ports[1] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
+print( "check 1" )
+
x = left.getDB( "admin" ).runCommand( "ismaster" )
assert( x.ismaster , "left: " + tojson( x ) )
@@ -15,6 +17,8 @@ assert( x.ismaster , "right: " + tojson( x ) )
ldb = left.getDB( "test" )
rdb = right.getDB( "test" )
+print( "check 2" )
+
ldb.foo.insert( { _id : 1 , x : "eliot" } )
var result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } );
printjson(result);
@@ -27,12 +31,12 @@ print( "check 3" )
assert.eq( 2 , ldb.foo.count() , "B1" )
assert.eq( 2 , rdb.foo.count() , "B2" )
-
+print( "going to stop everything" )
for ( var i=0; i<ports.length; i++ ){
stopMongod( ports[i] );
}
-
+print( "yay" )
diff --git a/jstests/repl/pair1.js b/jstests/repl/pair1.js
index b8b7ffd..84dd7b7 100644
--- a/jstests/repl/pair1.js
+++ b/jstests/repl/pair1.js
@@ -40,8 +40,8 @@ check = function( s ) {
// check that slave reads and writes are guarded
checkSlaveGuard = function( s ) {
var t = s.getDB( baseName + "-temp" ).temp;
- assert.throws( t.find().count, {}, "not master" );
- assert.throws( t.find(), {}, "not master", "find did not assert" );
+ assert.throws( t.find().count, [], "not master" );
+ assert.throws( t.find(), [], "not master", "find did not assert" );
checkError = function() {
assert.eq( "not master", s.getDB( "admin" ).getLastError() );
diff --git a/jstests/repl/repl1.js b/jstests/repl/repl1.js
index 60f3942..9f46f7a 100644
--- a/jstests/repl/repl1.js
+++ b/jstests/repl/repl1.js
@@ -48,6 +48,8 @@ doTest = function( signal ) {
assert.soon( function() { return as.find().count() == 1020; } );
assert.eq( 1, as.find( { i: 1019 } ).count() );
+ assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
+
rt.stop();
}
diff --git a/jstests/repl/repl11.js b/jstests/repl/repl11.js
index c5c63b3..aef9872 100644
--- a/jstests/repl/repl11.js
+++ b/jstests/repl/repl11.js
@@ -35,6 +35,10 @@ doTest = function( signal ) {
sa = s.getDB( baseName ).a;
assert.soon( function() { return 1 == sa.count(); } );
+ s.getDB( "local" ).auth( "repl", "foo" );
+ assert.commandWorked( s.getDB( "admin" )._adminCommand( {serverStatus:1,repl:1} ) );
+ assert.commandWorked( s.getDB( "admin" )._adminCommand( {serverStatus:1,repl:2} ) );
+
rt.stop( false, signal );
ma.save( {} );
diff --git a/jstests/repl/repl2.js b/jstests/repl/repl2.js
index c9fe6b9..42b0caf 100644
--- a/jstests/repl/repl2.js
+++ b/jstests/repl/repl2.js
@@ -31,6 +31,8 @@ doTest = function( signal ) {
assert.soon( function() { return 1 == s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok; } );
soonCount( 1001 );
+ assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
+
as = s.getDB("foo").a
assert.eq( 1, as.find( { i: 0 } ).count() );
assert.eq( 1, as.find( { i: 999 } ).count() );
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
index d8d268d..02955e5 100644
--- a/jstests/repl/snapshot3.js
+++ b/jstests/repl/snapshot3.js
@@ -47,7 +47,7 @@ assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
rp.master().getDB( baseName )[ baseName ].save( {i:500} );
assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) );
+assert( !rawMongoProgramOutput().match( new RegExp( "resync.*" + baseName + ".*\n" ) ) , "last1" );
+assert( !rawMongoProgramOutput().match( /SyncException/ ) , "last2" );
print("snapshot3.js finishes");
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
new file mode 100644
index 0000000..4945869
--- /dev/null
+++ b/jstests/replsets/auth1.js
@@ -0,0 +1,184 @@
+// check replica set authentication
+
+load("jstests/replsets/rslib.js");
+
+var name = "rs_auth1";
+var port = allocatePorts(4);
+var path = "jstests/replsets/";
+
+
+print("reset permissions");
+run("chmod", "644", path+"key1");
+run("chmod", "644", path+"key2");
+
+
+print("try starting mongod");
+var m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name);
+
+
+print("should fail with wrong permissions");
+assert.eq(m, 2, "mongod should exit w/ 2: permissions too open");
+stopMongod(port[0]);
+
+
+print("change permissions on #1 & #2");
+run("chmod", "600", path+"key1");
+run("chmod", "600", path+"key2");
+
+
+print("add a user to server0: foo");
+m = startMongodTest( port[0], name+"-0", 0 );
+m.getDB("admin").addUser("foo", "bar");
+m.getDB("test").addUser("bar", "baz");
+print("make sure user is written before shutting down");
+m.getDB("test").getLastError();
+stopMongod(port[0]);
+
+
+print("start up rs");
+var rs = new ReplSetTest({"name" : name, "nodes" : 3, "startPort" : port[0]});
+m = rs.restart(0, {"keyFile" : path+"key1"});
+var s = rs.start(1, {"keyFile" : path+"key1"});
+var s2 = rs.start(2, {"keyFile" : path+"key1"});
+
+var result = m.getDB("admin").auth("foo", "bar");
+assert.eq(result, 1, "login failed");
+result = m.getDB("admin").runCommand({replSetInitiate : rs.getReplSetConfig()});
+assert.eq(result.ok, 1, "couldn't initiate: "+tojson(result));
+
+var master = rs.getMaster().getDB("test");
+wait(function() {
+ var status = master.adminCommand({replSetGetStatus:1});
+ return status.members && status.members[1].state == 2 && status.members[2].state == 2;
+ });
+
+master.foo.insert({x:1});
+master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+
+
+print("try some legal and illegal reads");
+var r = master.foo.findOne();
+assert.eq(r.x, 1);
+
+s.setSlaveOk();
+slave = s.getDB("test");
+
+function doQueryOn(p) {
+ var err = {};
+ try {
+ r = p.foo.findOne();
+ }
+ catch(e) {
+ if (typeof(JSON) != "undefined") {
+ err = JSON.parse(e.substring(6));
+ }
+ else if (e.indexOf("10057") > 0) {
+ err.code = 10057;
+ }
+ }
+ assert.eq(err.code, 10057);
+};
+
+doQueryOn(slave);
+master.adminCommand({logout:1});
+doQueryOn(master);
+
+
+result = slave.auth("bar", "baz");
+assert.eq(result, 1);
+
+r = slave.foo.findOne();
+assert.eq(r.x, 1);
+
+
+print("add some data");
+master.auth("bar", "baz");
+for (var i=0; i<1000; i++) {
+ master.foo.insert({x:i, foo : "bar"});
+}
+master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+
+
+print("fail over");
+rs.stop(0);
+
+wait(function() {
+ function getMaster(s) {
+ var result = s.getDB("admin").runCommand({isMaster: 1});
+ printjson(result);
+ if (result.ismaster) {
+ master = s.getDB("test");
+ return true;
+ }
+ return false;
+ }
+
+ if (getMaster(s) || getMaster(s2)) {
+ return true;
+ }
+ return false;
+ });
+
+
+print("add some more data 1");
+master.auth("bar", "baz");
+for (var i=0; i<1000; i++) {
+ master.foo.insert({x:i, foo : "bar"});
+}
+master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+
+
+print("resync");
+rs.restart(0);
+
+
+print("add some more data 2");
+for (var i=0; i<1000; i++) {
+ master.foo.insert({x:i, foo : "bar"});
+}
+master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+
+
+print("add member with wrong key");
+var conn = new MongodRunner(port[3], "/data/db/"+name+"-3", null, null, ["--replSet","rs_auth1","--rest","--oplogSize","2", "--keyFile", path+"key2"], {no_bind : true});
+conn.start();
+
+
+master.getSisterDB("admin").auth("foo", "bar");
+var config = master.getSisterDB("local").system.replset.findOne();
+config.members.push({_id : 3, host : getHostName()+":"+port[3]});
+config.version++;
+try {
+ master.adminCommand({replSetReconfig:config});
+}
+catch (e) {
+ print("error: "+e);
+}
+reconnect(master);
+master.getSisterDB("admin").auth("foo", "bar");
+
+
+print("shouldn't ever sync");
+for (var i = 0; i<30; i++) {
+ print("iteration: " +i);
+ var results = master.adminCommand({replSetGetStatus:1});
+ printjson(results);
+ assert(results.members[3].state != 2);
+ sleep(1000);
+}
+
+
+print("stop member");
+stopMongod(port[3]);
+
+
+print("start back up with correct key");
+conn = new MongodRunner(port[3], "/data/db/"+name+"-3", null, null, ["--replSet","rs_auth1","--rest","--oplogSize","2", "--keyFile", path+"key1"], {no_bind : true});
+conn.start();
+
+wait(function() {
+ var results = master.adminCommand({replSetGetStatus:1});
+ printjson(results);
+ return results.members[3].state == 2;
+ });
+
diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js
new file mode 100644
index 0000000..76de797
--- /dev/null
+++ b/jstests/replsets/buildindexes.js
@@ -0,0 +1,86 @@
+doTest = function( signal ) {
+
+ var name = "buildIndexes";
+ var host = getHostName();
+
+ var replTest = new ReplSetTest( {name: name, nodes: 3} );
+
+ var nodes = replTest.startSet();
+
+ var config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.members[2].buildIndexes = false;
+
+ replTest.initiate(config);
+
+ var master = replTest.getMaster().getDB(name);
+ var slaveConns = replTest.liveNodes.slaves;
+ var slave = [];
+ for (var i in slaveConns) {
+ slaveConns[i].setSlaveOk();
+ slave.push(slaveConns[i].getDB(name));
+ }
+ replTest.awaitReplication();
+
+ print("creating an index on x");
+ master.x.ensureIndex({y : 1});
+ printjson(master.x.stats());
+
+ for (var i=0; i<100; i++) {
+ master.x.insert({x:1,y:"abc",c:1});
+ }
+
+ replTest.awaitReplication();
+
+ printjson(slave[0].runCommand({count: "x"}));
+ var ns = master.x+"";
+ print("namespace: "+ns);
+
+ // can't query system.indexes from slave, so we'll look at coll.stats()
+ printjson(slave[0].adminCommand({replSetGetStatus:1}));
+ printjson(slave[0].getSisterDB("local").system.replset.findOne());
+ printjson(master.stats());
+ printjson(slave[0].stats());
+ printjson(slave[1].stats());
+ printjson(master.x.stats());
+ printjson(slave[0].x.stats());
+ printjson(slave[1].x.stats());
+ print("sleeping");
+ sleep(20000);
+ var indexes = slave[0].stats().indexes;
+ assert.eq(indexes, 2, 'number of indexes');
+
+ indexes = slave[1].stats().indexes;
+ assert.eq(indexes, 1);
+
+
+ indexes = slave[0].x.stats().indexSizes;
+ printjson(indexes);
+
+ var count = 0;
+ for (var i in indexes) {
+ count++;
+ if (i == "_id_") {
+ continue;
+ }
+ print(i);
+ print(i.match(/y_/));
+ assert(i.match(/y_/));
+ }
+
+ assert.eq(count, 2);
+
+ indexes = slave[1].x.stats().indexSizes;
+ printjson(indexes);
+
+ count = 0;
+ for (var i in indexes) {
+ count++;
+ }
+
+ assert.eq(count, 1);
+
+ replTest.stopSet(15);
+}
+
+doTest(15);
diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js
new file mode 100644
index 0000000..6d2d0f3
--- /dev/null
+++ b/jstests/replsets/cloneDb.js
@@ -0,0 +1,52 @@
+// Test for cloning a db from a replica set [SERVER-1643] -Tony
+
+load('jstests/libs/grid.js')
+
+doTest = function( signal ) {
+
+ var N = 2000
+
+ // ~1KB string
+ var Text = ''
+ for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+ // Create replica set
+ var repset = new ReplicaSet ('testSet', 3) .begin()
+ var master = repset.getMaster()
+ var db1 = master.getDB('test')
+
+ // Insert data
+ for (var i = 0; i < N; i++) {
+ db1['foo'].insert({x: i, text: Text})
+ db1.getLastError(2) // wait to be copied to at least one secondary
+ }
+
+ // Create single server
+ var solo = new Server ('singleTarget')
+ var soloConn = solo.begin()
+ var db2 = soloConn.getDB('test')
+
+ // Clone db from replica set to single server
+ db2.cloneDatabase (repset.getURL())
+
+ // Confirm clone worked
+ assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test1)')
+
+ // Now test the reverse direction
+ db1 = master.getDB('test2')
+ db2 = soloConn.getDB('test2')
+ for (var i = 0; i < N; i++) {
+ db2['foo'].insert({x: i, text: Text})
+ db2.getLastError()
+ }
+ db1.cloneDatabase (solo.host())
+ assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test2)')
+
+ // Shut down replica set and single server
+ solo.end()
+ repset.stopSet( signal )
+}
+
+doTest( 15 );
+print("replsets/cloneDb.js SUCCESS");
diff --git a/jstests/replsets/config1.js b/jstests/replsets/config1.js
new file mode 100644
index 0000000..748ce8f
--- /dev/null
+++ b/jstests/replsets/config1.js
@@ -0,0 +1,21 @@
+doTest = function( signal ) {
+ var name = 'config1';
+
+ var replTest = new ReplSetTest( {name: name, nodes: 3} );
+ var nodes = replTest.startSet();
+
+ var config = replTest.getReplSetConfig();
+ config.settings = {"heartbeatSleep" : .5, heartbeatTimeout : .8};
+
+ replTest.initiate(config);
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ config = master.getDB("local").system.replset.findOne();
+ assert.eq(config.settings.heartbeatSleep, .5);
+ assert.eq(config.settings.heartbeatTimeout, .8);
+};
+
+doTest(15);
diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js
new file mode 100644
index 0000000..d7c3905
--- /dev/null
+++ b/jstests/replsets/fastsync.js
@@ -0,0 +1,117 @@
+/*
+ * 1. insert 100000 objects
+ * 2. export to two dbpaths
+ * 3. add one node w/fastsync
+ * 4. check that we never get "errmsg" : "initial sync cloning db: whatever"
+ * 5. check writes are replicated
+ */
+
+var w = 0;
+var wait = function(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("toostale.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+}
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ a.getDB("foo").bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+ports = allocatePorts( 3 );
+
+var basename = "jstests_fastsync";
+var basePath = "/data/db/" + basename;
+var hostname = getHostName();
+
+var pargs = new MongodRunner( ports[ 0 ], basePath + "-p", false, false,
+ ["--replSet", basename, "--oplogSize", 2],
+ {no_bind : true} );
+p = pargs.start();
+
+var admin = p.getDB("admin");
+var foo = p.getDB("foo");
+var local = p.getDB("local");
+
+var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0]}]};
+printjson(config);
+var result = admin.runCommand({replSetInitiate : config});
+print("result:");
+printjson(result);
+
+var count = 0;
+while (count < 10 && result.ok != 1) {
+ count++;
+ sleep(2000);
+ result = admin.runCommand({replSetInitiate : config});
+}
+
+assert(result.ok, tojson(result));
+assert.soon(function() { return admin.runCommand({isMaster:1}).ismaster; });
+
+print("1");
+for (var i=0; i<100000; i++) {
+ foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
+}
+print("total in foo: "+foo.bar.count());
+
+
+print("2");
+admin.runCommand( {fsync:1,lock:1} );
+copyDbpath( basePath + "-p", basePath + "-s" );
+admin.$cmd.sys.unlock.findOne();
+
+
+print("3");
+var sargs = new MongodRunner( ports[ 1 ], basePath + "-s", false, false,
+ ["--replSet", basename, "--fastsync",
+ "--oplogSize", 2], {no_bind : true} );
+var reuseData = true;
+sargs.start(reuseData);
+
+config = local.system.replset.findOne();
+config.version++;
+config.members.push({_id:1, host:hostname+":"+ports[1]});
+
+result = admin.runCommand({replSetReconfig : config});
+assert(result.ok, "reconfig worked");
+reconnect(p);
+
+print("4");
+var status = admin.runCommand({replSetGetStatus : 1});
+var count = 0;
+while (status.members[1].state != 2 && count < 200) {
+ print("not a secondary yet");
+ if (count % 10 == 0) {
+ printjson(status);
+ }
+ assert(!status.members[1].errmsg || !status.members[1].errmsg.match("^initial sync cloning db"));
+
+ sleep(1000);
+
+ // disconnection could happen here
+ try {
+ status = admin.runCommand({replSetGetStatus : 1});
+ }
+ catch (e) {
+ print(e);
+ }
+ count++;
+}
+
+assert.eq(status.members[1].state, 2);
diff --git a/jstests/replsets/getlasterror_w2.js b/jstests/replsets/getlasterror_w2.js
new file mode 100644
index 0000000..795e667
--- /dev/null
+++ b/jstests/replsets/getlasterror_w2.js
@@ -0,0 +1,36 @@
+// BUG: [SERVER-1768] replica set getlasterror {w: 2} after 2000
+// inserts hangs while secondary servers log "replSet error RS102 too stale to catch up" every once in a while
+
+function newReplicaSet (name, numServers) {
+ var rs = new ReplSetTest({name: name, nodes: numServers})
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+function go() {
+var N = 2000
+
+// ~1KB string
+var Text = ''
+for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+// Create replica set of 3 servers
+var repset = newReplicaSet('repset', 3)
+var conn = repset.getMaster()
+var db = conn.getDB('test')
+
+// Add data to it
+for (var i = 0; i < N; i++)
+ db['foo'].insert({x: i, text: Text})
+
+// wait to be copied to at least one secondary (BUG hangs here)
+db.getLastError(2)
+
+print('getlasterror_w2.js SUCCESS')
+}
+
+// turn off until fixed
+//go();
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
new file mode 100644
index 0000000..539fe44
--- /dev/null
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -0,0 +1,105 @@
+doTest = function( signal ) {
+
+ // Test basic replica set functionality.
+ // -- Replication
+ // -- Failover
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // save some records
+ var len = 100
+ for (var i = 0; i < len; ++i) {
+ master.getDB("foo").foo.save({a: i});
+ }
+
+ // This method will check the oplogs of the master
+ // and slaves in the set and wait until the change has replicated.
+ replTest.awaitReplication();
+ print("Sleeping 10s for slaves to go to secondary state");
+ sleep(10000);
+
+ slaves = replTest.liveNodes.slaves;
+ assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length );
+ slaves.forEach(function(slave) {
+ // try to read from slave
+ slave.slaveOk = true;
+ var count = slave.getDB("foo").foo.count();
+ printjson( count );
+ assert.eq( len , count , "slave count wrong: " + slave );
+
+ print("Doing a findOne to verify we can get a row");
+ var one = slave.getDB("foo").foo.findOne();
+ printjson(one);
+
+// stats = slave.getDB("foo").adminCommand({replSetGetStatus:1});
+// printjson(stats);
+
+ print("Calling group() with slaveOk=true, must succeed");
+ slave.slaveOk = true;
+ count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}});
+ printjson( count );
+ assert.eq( len , count[0].n , "slave group count wrong: " + slave );
+
+ print("Calling group() with slaveOk=false, must fail");
+ slave.slaveOk = false;
+ try {
+ count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}});
+ assert(false, "group() succeeded with slaveOk=false");
+ } catch (e) {
+ print("Received exception: " + e);
+ }
+
+ print("Calling inline mr() with slaveOk=true, must succeed");
+ slave.slaveOk = true;
+ map = function() { emit(this.a, 1); };
+ reduce = function(key, vals) { var sum = 0; for (var i = 0; i < vals.length; ++i) { sum += vals[i]; } return sum; };
+ slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}});
+
+ print("Calling mr() to collection with slaveOk=true, must fail");
+ try {
+ slave.getDB("foo").foo.mapReduce(map, reduce, "output");
+ assert(false, "mapReduce() to collection succeeded on slave");
+ } catch (e) {
+ print("Received exception: " + e);
+ }
+
+ print("Calling inline mr() with slaveOk=false, must fail");
+ slave.slaveOk = false;
+ try {
+ slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}});
+ assert(false, "mapReduce() succeeded on slave with slaveOk=false");
+ } catch (e) {
+ print("Received exception: " + e);
+ }
+ print("Calling mr() to collection with slaveOk=false, must fail");
+ try {
+ slave.getDB("foo").foo.mapReduce(map, reduce, "output");
+ assert(false, "mapReduce() to collection succeeded on slave with slaveOk=false");
+ } catch (e) {
+ print("Received exception: " + e);
+ }
+
+ });
+
+
+
+ // Shut down the set and finish the test.
+ replTest.stopSet( signal );
+}
+
+doTest( 15 );
+print("SUCCESS");
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
new file mode 100644
index 0000000..ee30b4e
--- /dev/null
+++ b/jstests/replsets/initial_sync1.js
@@ -0,0 +1,129 @@
+/**
+ * Test killing the secondary during initially sync
+ *
+ * 1. Bring up set
+ * 2. Insert some data
+ * 4. Make sure synced
+ * 5. Freeze #2
+ * 6. Bring up #3
+ * 7. Kill #2 in the middle of syncing
+ * 8. Eventually it should become a secondary
+ * 9. Bring #2 back up
+ * 10. Insert some stuff
+ * 11. Everyone happy eventually
+ */
+
+load("jstests/replsets/rslib.js");
+var basename = "jstests_initsync1";
+
+
+print("1. Bring up set");
+var replTest = new ReplSetTest( {name: basename, nodes: 2} );
+var conns = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+var foo = master.getDB("foo");
+var admin = master.getDB("admin");
+
+var slave1 = replTest.liveNodes.slaves[0];
+var admin_s1 = slave1.getDB("admin");
+var local_s1 = slave1.getDB("local");
+
+print("2. Insert some data");
+for (var i=0; i<10000; i++) {
+ foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
+}
+print("total in foo: "+foo.bar.count());
+
+
+print("4. Make sure synced");
+replTest.awaitReplication();
+
+
+print("5. Freeze #2");
+admin_s1.runCommand({replSetFreeze:999999});
+
+
+print("6. Bring up #3");
+var ports = allocatePorts( 3 );
+var basePath = "/data/db/" + basename;
+var hostname = getHostName();
+
+var sargs = new MongodRunner( ports[ 2 ], basePath, false, false,
+ ["--replSet", basename, "--oplogSize", 2],
+ {no_bind : true} );
+var slave2 = sargs.start();
+var local_s2 = slave2.getDB("local");
+var admin_s2 = slave2.getDB("admin");
+
+var config = replTest.getReplSetConfig();
+config.version = 2;
+config.members.push({_id:2, host:hostname+":"+ports[2]});
+
+try {
+ admin.runCommand({replSetReconfig:config});
+}
+catch(e) {
+ print(e);
+}
+reconnect(slave1);
+reconnect(slave2);
+
+wait(function() {
+ var config2 = local_s1.system.replset.findOne();
+ var config3 = local_s2.system.replset.findOne();
+
+ printjson(config2);
+ printjson(config3);
+
+ return config2.version == config.version &&
+ (config3 && config3.version == config.version);
+ });
+
+wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members &&
+ (status.members[2].state == 3 || status.members[2].state == 2);
+ });
+
+
+print("7. Kill #2 in the middle of syncing");
+replTest.stop(1);
+
+
+print("8. Eventually it should become a secondary");
+print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
+sleep(5*60*1000);
+wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus:1});
+ occasionally(function() { printjson(status); });
+ return status.members[2].state == 2;
+ });
+
+
+print("9. Bring #2 back up");
+replTest.start(1, {}, true);
+reconnect(slave1);
+wait(function() {
+ var status = admin_s1.runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.ok == 1 && status.members &&
+ status.members[1].state == 2 || status.members[1].state == 1;
+ });
+
+
+/**
+ * TODO: this fails on buildbot
+ * see SERVER-2550
+print("10. Insert some stuff");
+master = replTest.getMaster();
+for (var i=0; i<10000; i++) {
+ foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
+}
+
+
+print("11. Everyone happy eventually");
+replTest.awaitReplication();
+*/
diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js
new file mode 100644
index 0000000..3ad3972
--- /dev/null
+++ b/jstests/replsets/initial_sync2.js
@@ -0,0 +1,179 @@
+/**
+ * Test killing the primary during initial sync
+ * and don't allow the other secondary to become primary
+ *
+ * 1. Bring up set
+ * 2. Insert some data
+ * 4. Make sure synced
+ * 5. Freeze #2
+ * 6. Bring up #3
+ * 7. Kill #1 in the middle of syncing
+ * 8. Check that #3 makes it into secondary state
+ * 9. Bring #1 back up
+ * 10. Initial sync should succeed
+ * 11. Insert some stuff
+ * 12. Everyone happy eventually
+ */
+
+load("jstests/replsets/rslib.js");
+var basename = "jstests_initsync2";
+
+var doTest = function() {
+
+print("1. Bring up set");
+var replTest = new ReplSetTest( {name: basename, nodes: 2} );
+var conns = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+var origMaster = master;
+var foo = master.getDB("foo");
+var admin = master.getDB("admin");
+
+var slave1 = replTest.liveNodes.slaves[0];
+var admin_s1 = slave1.getDB("admin");
+var local_s1 = slave1.getDB("local");
+
+print("2. Insert some data");
+for (var i=0; i<10000; i++) {
+ foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
+}
+print("total in foo: "+foo.bar.count());
+
+
+print("4. Make sure synced");
+replTest.awaitReplication();
+
+
+print("5. Freeze #2");
+admin_s1.runCommand({replSetFreeze:999999});
+
+
+print("6. Bring up #3");
+var ports = allocatePorts( 3 );
+var basePath = "/data/db/" + basename;
+var hostname = getHostName();
+
+var sargs = new MongodRunner( ports[ 2 ], basePath, false, false,
+ ["--replSet", basename, "--oplogSize", 2],
+ {no_bind : true} );
+var slave2 = sargs.start();
+var local_s2 = slave2.getDB("local");
+var admin_s2 = slave2.getDB("admin");
+
+var config = replTest.getReplSetConfig();
+config.version = 2;
+config.members.push({_id:2, host:hostname+":"+ports[2]});
+
+try {
+ admin.runCommand({replSetReconfig:config});
+}
+catch(e) {
+ print(e);
+}
+reconnect(slave1);
+reconnect(slave2);
+
+wait(function() {
+ var config2 = local_s1.system.replset.findOne();
+ var config3 = local_s2.system.replset.findOne();
+
+ printjson(config2);
+ printjson(config3);
+
+ return config2.version == config.version &&
+ (config3 && config3.version == config.version);
+ });
+admin_s2.runCommand({replSetFreeze:999999});
+
+
+wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members &&
+ (status.members[2].state == 3 || status.members[2].state == 2);
+ });
+
+
+print("7. Kill #1 in the middle of syncing");
+replTest.stop(0);
+
+
+print("8. Check that #3 makes it into secondary state");
+wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus:1});
+ occasionally(function() { printjson(status);}, 10);
+ if (status.members[2].state == 2 || status.members[2].state == 1) {
+ return true;
+ }
+ return false;
+ });
+
+
+print("9. Bring #1 back up");
+replTest.start(0, {}, true);
+reconnect(master);
+wait(function() {
+ var status = admin.runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members &&
+ (status.members[0].state == 1 || status.members[0].state == 2);
+ });
+
+
+print("10. Initial sync should succeed");
+wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members &&
+ status.members[2].state == 2 || status.members[2].state == 1;
+ });
+
+
+print("11. Insert some stuff");
+// ReplSetTest doesn't find master correctly unless all nodes are defined by
+// ReplSetTest
+for (var i = 0; i<30; i++) {
+ var result = admin.runCommand({isMaster : 1});
+ if (result.ismaster) {
+ break;
+ }
+ else if (result.primary) {
+ master = connect(result.primary+"/admin").getMongo();
+ break;
+ }
+ sleep(1000);
+}
+
+for (var i=0; i<10000; i++) {
+ foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
+}
+
+
+print("12. Everyone happy eventually");
+// if 3 is master...
+if (master+"" != origMaster+"") {
+ print("3 is master");
+ slave2 = origMaster;
+}
+
+wait(function() {
+ var op1 = getLatestOp(master);
+ var op2 = getLatestOp(slave1);
+ var op3 = getLatestOp(slave2);
+
+ occasionally(function() {
+ print("latest ops:");
+ printjson(op1);
+ printjson(op2);
+ printjson(op3);
+ });
+
+ return friendlyEqual(getLatestOp(master), getLatestOp(slave1)) &&
+ friendlyEqual(getLatestOp(master), getLatestOp(slave2));
+ });
+
+replTest.stopSet();
+};
+
+doTest();
diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js
new file mode 100644
index 0000000..471aa16
--- /dev/null
+++ b/jstests/replsets/initial_sync3.js
@@ -0,0 +1,87 @@
+/* test initial sync options
+ *
+ * {state : 1}
+ * {state : 2}
+ * {name : host+":"+port}
+ * {_id : 2}
+ * {optime : now}
+ * {optime : 1970}
+ */
+
+load("jstests/replsets/rslib.js");
+var name = "initialsync3";
+var host = getHostName();
+var port = allocatePorts(7);
+
+print("Start set with three nodes");
+var replTest = new ReplSetTest( {name: name, nodes: 7} );
+var nodes = replTest.startSet();
+replTest.initiate({
+ _id : name,
+ members : [
+ {_id:0, host : host+":"+port[0]},
+ {_id:1, host : host+":"+port[1], initialSync : {state : 1}},
+ {_id:2, host : host+":"+port[2], initialSync : {state : 2}},
+ {_id:3, host : host+":"+port[3], initialSync : {name : host+":"+port[2]}},
+ {_id:4, host : host+":"+port[4], initialSync : {_id : 2}},
+ {_id:5, host : host+":"+port[5], initialSync : {optime : new Date()}},
+ {_id:6, host : host+":"+port[6], initialSync : {optime : new Date(0)}}
+ ]});
+
+var master = replTest.getMaster();
+
+print("Initial sync");
+master.getDB("foo").bar.baz.insert({x:1});
+
+print("Make sure everyone's secondary");
+wait(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() {
+ printjson(status);
+ });
+
+ if (!status.members) {
+ return false;
+ }
+
+ for (i=0; i<7; i++) {
+ if (status.members[i].state != 1 && status.members[i].state != 2) {
+ return false;
+ }
+ }
+ return true;
+
+ });
+
+replTest.awaitReplication();
+
+replTest.stopSet();
+
+print("reconfig");
+
+var rs2 = new ReplSetTest( {name: 'reconfig-isync3', nodes: 3} );
+rs2.startSet();
+rs2.initiate();
+
+master = rs2.getMaster();
+var config = master.getDB("local").system.replset.findOne();
+config.version++;
+config.members[0].initialSync = {state : 2};
+config.members[1].initialSync = {state : 1};
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print("trying to reconfigure: "+e);
+}
+
+master = rs2.getMaster();
+config = master.getDB("local").system.replset.findOne();
+
+assert(typeof(config.members[0].initialSync) == "object");
+assert.eq(config.members[0].initialSync.state, 2);
+assert.eq(config.members[1].initialSync.state, 1);
+
+rs2.stopSet();
+
+print("initialSync3 success!");
diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js
new file mode 100644
index 0000000..22865e5
--- /dev/null
+++ b/jstests/replsets/ismaster1.js
@@ -0,0 +1,36 @@
+/**
+ * 1. Check passive field in isMaster
+ */
+
+load("jstests/replsets/rslib.js");
+
+var name = "ismaster";
+var host = getHostName();
+
+var replTest = new ReplSetTest( {name: name, nodes: 3} );
+
+var nodes = replTest.startSet();
+
+var config = replTest.getReplSetConfig();
+config.members[1].priority = 0;
+config.members[2].priority = 0;
+
+replTest.initiate(config);
+
+var master = replTest.getMaster();
+wait(function() {
+ var result = master.getDB("admin").runCommand({replSetGetStatus:1});
+ return result.members && result.members[0].state == 1 &&
+ result.members[1].state == 2 && result.members[2].state == 2;
+ });
+
+var result = master.getDB("admin").runCommand({isMaster:1});
+assert(!('passive' in result), tojson(result));
+
+result = replTest.liveNodes.slaves[0].getDB("admin").runCommand({isMaster:1});
+assert('passive' in result, tojson(result));
+
+result = replTest.liveNodes.slaves[1].getDB("admin").runCommand({isMaster:1});
+assert('passive' in result, tojson(result));
+
+replTest.stopSet();
diff --git a/jstests/replsets/key1 b/jstests/replsets/key1
new file mode 100644
index 0000000..b5c19e4
--- /dev/null
+++ b/jstests/replsets/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/jstests/replsets/key2 b/jstests/replsets/key2
new file mode 100644
index 0000000..cbde821
--- /dev/null
+++ b/jstests/replsets/key2
@@ -0,0 +1 @@
+other key
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
new file mode 100644
index 0000000..ebd17d6
--- /dev/null
+++ b/jstests/replsets/remove1.js
@@ -0,0 +1,132 @@
+/* test removing a node from a replica set
+ *
+ * Start set with three nodes
+ * Initial sync
+ * Remove slave1
+ * Remove slave2
+ * Bring slave1 back up
+ * Bring slave2 back up
+ * Add them back as slave
+ * Make sure everyone's secondary
+ */
+
+load("jstests/replsets/rslib.js");
+var name = "removeNodes";
+var host = getHostName();
+
+
+print("Start set with three nodes");
+var replTest = new ReplSetTest( {name: name, nodes: 3} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+
+print("Initial sync");
+master.getDB("foo").bar.baz.insert({x:1});
+
+replTest.awaitReplication();
+
+
+print("Remove slave2");
+var config = replTest.getReplSetConfig();
+
+config.members.pop();
+config.version = 2;
+try {
+ master.getDB("admin").runCommand({replSetReconfig:config});
+}
+catch(e) {
+ print(e);
+}
+reconnect(master);
+
+
+print("Remove slave1");
+config.members.pop();
+config.version = 3;
+try {
+ master.getDB("admin").runCommand({replSetReconfig:config});
+}
+catch(e) {
+ print(e);
+}
+reconnect(master);
+
+print("sleeping 1");
+sleep(10000);
+// these are already down, but this clears their ports from memory so that they
+// can be restarted later
+stopMongod(replTest.getPort(1));
+stopMongod(replTest.getPort(2));
+
+
+print("Bring slave1 back up");
+var paths = [ replTest.getPath(1), replTest.getPath(2) ];
+var ports = allocatePorts(2, replTest.getPort(2)+1);
+var args = ["mongod", "--port", ports[0], "--dbpath", paths[0], "--noprealloc", "--smallfiles", "--rest"];
+var conn = startMongoProgram.apply( null, args );
+conn.getDB("local").system.replset.remove();
+printjson(conn.getDB("local").runCommand({getlasterror:1}));
+print(conn);
+print("sleeping 2");
+sleep(10000);
+stopMongod(ports[0]);
+
+replTest.restart(1);
+
+
+print("Bring slave2 back up");
+args[2] = ports[1];
+args[4] = paths[1];
+conn = startMongoProgram.apply( null, args );
+conn.getDB("local").system.replset.remove();
+print("path: "+paths[1]);
+print("sleeping 3");
+sleep(10000);
+stopMongod(ports[1]);
+
+replTest.restart(2);
+sleep(10000);
+
+
+print("Add them back as slaves");
+config.members.push({_id:1, host : host+":"+replTest.getPort(1)});
+config.members.push({_id:2, host : host+":"+replTest.getPort(2)});
+config.version = 4;
+wait(function() {
+ try {
+ master.getDB("admin").runCommand({replSetReconfig:config});
+ }
+ catch(e) {
+ print(e);
+ }
+ reconnect(master);
+
+ master.setSlaveOk();
+ var newConfig = master.getDB("local").system.replset.findOne();
+ return newConfig.version == 4;
+ });
+
+
+print("Make sure everyone's secondary");
+wait(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() {
+ printjson(status);
+ });
+
+ if (!status.members || status.members.length != 3) {
+ return false;
+ }
+
+ for (var i = 0; i<3; i++) {
+ if (status.members[i].state != 1 && status.members[i].state != 2) {
+ return false;
+ }
+ }
+ return true;
+ });
+
+replTest.stopSet();
+
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index f18b467..4849620 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,126 +1,126 @@
-print("\n\nreplset2.js BEGIN");
-
-doTest = function (signal) {
-
- // FAILING TEST
- // See below:
-
- // Test replication with getLastError
-
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 });
-
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
-
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
-
- var testDB = "repl-test";
-
- // Call getMaster to return a reference to the node that's been
- // elected master.
- var master = replTest.getMaster();
-
- // Wait for replication to a single node
- master.getDB(testDB).bar.insert({ n: 1 });
-
- // Wait for initial sync
- replTest.awaitReplication();
-
- var slaves = replTest.liveNodes.slaves;
- slaves.forEach(function (slave) { slave.setSlaveOk(); });
-
- var failed = false;
- var callGetLastError = function (w, timeout, db) {
- try {
- var result = master.getDB(db).getLastErrorObj(w, timeout);
- print("replset2.js getLastError result: " + tojson(result));
- if (result['ok'] != 1) {
- print("replset2.js FAILURE getlasterror not ok");
- failed = true;
- }
- }
- catch (e) {
- print("\nreplset2.js exception in getLastError: " + e + '\n');
- throw e;
- }
- }
-
- // Test getlasterror with multiple inserts
- // TEST FAILS HEREg
- print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****")
-
- printjson(master.getDB("admin").runCommand("replSetGetStatus"));
-
- master.getDB(testDB).foo.insert({ n: 1 });
- master.getDB(testDB).foo.insert({ n: 2 });
- master.getDB(testDB).foo.insert({ n: 3 });
-
- print("\nreplset2.js **** TEMP 1 ****")
-
- printjson(master.getDB("admin").runCommand("replSetGetStatus"));
-
- callGetLastError(3, 25000, testDB);
-
- print("replset2.js **** TEMP 1a ****")
-
- m1 = master.getDB(testDB).foo.findOne({ n: 1 });
- printjson(m1);
- assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts");
-
- print("replset2.js **** TEMP 1b ****")
-
- var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
- assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts");
-
- var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
- assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts");
-
- // Test getlasterror with a simple insert
- print("replset2.js **** Try inserting a single record ****")
- master.getDB(testDB).dropDatabase();
- master.getDB(testDB).foo.insert({ n: 1 });
- callGetLastError(3, 10000, testDB);
-
- m1 = master.getDB(testDB).foo.findOne({ n: 1 });
- printjson(m1);
- assert(m1['n'] == 1, "replset2.js Failed to save to master");
-
- s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
- assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0");
-
- s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
- assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
-
- // Test getlasterror with large insert
- print("replset2.js **** Try inserting many records ****")
+print("\n\nreplset2.js BEGIN");
+
+doTest = function (signal) {
+
+ // FAILING TEST
+ // See below:
+
+ // Test replication with getLastError
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 });
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ var testDB = "repl-test";
+
+ // Call getMaster to return a reference to the node that's been
+ // elected master.
+ var master = replTest.getMaster();
+
+ // Wait for replication to a single node
+ master.getDB(testDB).bar.insert({ n: 1 });
+
+ // Wait for initial sync
+ replTest.awaitReplication();
+
+ var slaves = replTest.liveNodes.slaves;
+ slaves.forEach(function (slave) { slave.setSlaveOk(); });
+
+ var failed = false;
+ var callGetLastError = function (w, timeout, db) {
+ try {
+ var result = master.getDB(db).getLastErrorObj(w, timeout);
+ print("replset2.js getLastError result: " + tojson(result));
+ if (result['ok'] != 1) {
+ print("replset2.js FAILURE getlasterror not ok");
+ failed = true;
+ }
+ }
+ catch (e) {
+ print("\nreplset2.js exception in getLastError: " + e + '\n');
+ throw e;
+ }
+ }
+
+ // Test getlasterror with multiple inserts
+ // TEST FAILS HEREg
+ print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ master.getDB(testDB).foo.insert({ n: 1 });
+ master.getDB(testDB).foo.insert({ n: 2 });
+ master.getDB(testDB).foo.insert({ n: 3 });
+
+ print("\nreplset2.js **** TEMP 1 ****")
+
+ printjson(master.getDB("admin").runCommand("replSetGetStatus"));
+
+ callGetLastError(3, 25000, testDB);
+
+ print("replset2.js **** TEMP 1a ****")
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts");
+
+ print("replset2.js **** TEMP 1b ****")
+
+ var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts");
+
+ var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts");
+
+ // Test getlasterror with a simple insert
+ print("replset2.js **** Try inserting a single record ****")
+ master.getDB(testDB).dropDatabase();
+ master.getDB(testDB).foo.insert({ n: 1 });
+ callGetLastError(3, 10000, testDB);
+
+ m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ printjson(m1);
+ assert(m1['n'] == 1, "replset2.js Failed to save to master");
+
+ s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0");
+
+ s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
+
+ // Test getlasterror with large insert
+ print("replset2.js **** Try inserting many records ****")
try {
- bigData = new Array(2000).toString()
- for (var n = 0; n < 1000; n++) {
- master.getDB(testDB).baz.insert({ n: n, data: bigData });
- }
- callGetLastError(3, 60000, testDB);
-
- print("replset2.js **** V1 ")
-
- var verifyReplication = function (nodeName, collection) {
- data = collection.findOne({ n: 1 });
- assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
- data = collection.findOne({ n: 999 });
- assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
- }
-
- print("replset2.js **** V2 ")
-
- verifyReplication("master", master.getDB(testDB).baz);
- verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
- verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
-
- assert(failed == false, "replset2.js Replication with getLastError failed. See errors.");
+ bigData = new Array(2000).toString()
+ for (var n = 0; n < 1000; n++) {
+ master.getDB(testDB).baz.insert({ n: n, data: bigData });
+ }
+ callGetLastError(3, 60000, testDB);
+
+ print("replset2.js **** V1 ")
+
+ var verifyReplication = function (nodeName, collection) {
+ data = collection.findOne({ n: 1 });
+ assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
+ data = collection.findOne({ n: 999 });
+ assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
+ }
+
+ print("replset2.js **** V2 ")
+
+ verifyReplication("master", master.getDB(testDB).baz);
+ verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
+ verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+
+ assert(failed == false, "replset2.js Replication with getLastError failed. See errors.");
}
catch(e) {
print("ERROR: " + e);
@@ -132,10 +132,10 @@ doTest = function (signal) {
printjson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
}
-
- replTest.stopSet(signal);
+
+ replTest.stopSet(signal);
}
-doTest( 15 );
-
+doTest( 15 );
+
print("\nreplset2.js SUCCESS\n");
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index 8126b9d..faa0627 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -1,56 +1,80 @@
-
-doTest = function( signal ) {
-
- // Test replica set step down
-
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
-
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
-
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
-
- // Get master node
- var master = replTest.getMaster();
-
- // Write some data to master
- // NOTE: this test fails unless we write some data.
- master.getDB("foo").foo.save({a: 1});
- master.getDB("foo").runCommand({getlasterror: 1, w:3, wtimeout: 20000});
-
- // Step down master
- master.getDB("admin").runCommand({replSetStepDown: true});
-
- try {
- var new_master = replTest.getMaster();
- }
- catch( err ) {
- throw( "Could not elect new master before timeout." );
- }
-
- assert( master != new_master, "Old master shouldn't be equal to new master." );
-
- // Make sure that slaves are still up
- var result = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
- assert( result['ok'] == 1, "Could not verify that slaves were still up:" + result );
-
- slaves = replTest.liveNodes.slaves;
- assert.soon(function() {
- res = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1})
- return res.myState == 2;
- }, "Slave 0 state not ready.");
-
- assert.soon(function() {
- res = slaves[1].getDB("admin").runCommand({replSetGetStatus: 1})
- return res.myState == 2;
- }, "Slave 1 state not ready.");
-
- replTest.stopSet( 15 );
+
+doTest = function (signal) {
+
+ // Test replica set step down
+
+ // Replica set testing API
+ // Create a new replica set test. Specify set name and the number of nodes you want.
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+
+ // call startSet() to start each mongod in the replica set
+ // this returns a list of nodes
+ var nodes = replTest.startSet();
+
+ // Call initiate() to send the replSetInitiate command
+ // This will wait for initiation
+ replTest.initiate();
+
+ // Get master node
+ var master = replTest.getMaster();
+
+ // Write some data to master
+ // NOTE: this test fails unless we write some data.
+ master.getDB("foo").foo.save({ a: 1 });
+ master.getDB("foo").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 });
+
+ var phase = 1;
+
+ print(phase++);
+
+ // Step down master. Note: this may close our connection!
+ try {
+ master.getDB("admin").runCommand({ replSetStepDown: true });
+ } catch (err) {
+ print("caught: " + err + " on stepdown");
+ }
+
+ print(phase++);
+
+ try {
+ var new_master = replTest.getMaster();
+ }
+ catch (err) {
+ throw ("Could not elect new master before timeout.");
+ }
+
+ print(phase++);
+
+ assert(master != new_master, "Old master shouldn't be equal to new master.");
+
+ print(phase++);
+
+ // Make sure that slaves are still up
+ var result = new_master.getDB("admin").runCommand({ replSetGetStatus: 1 });
+ assert(result['ok'] == 1, "Could not verify that slaves were still up:" + result);
+
+ print(phase++);
+
+ slaves = replTest.liveNodes.slaves;
+ assert.soon(function () {
+ try {
+ res = slaves[0].getDB("admin").runCommand({ replSetGetStatus: 1 })
+ } catch (err) { }
+ return res.myState == 2;
+ }, "Slave 0 state not ready.");
+
+ print(phase++);
+
+ assert.soon(function () {
+ try {
+ res = slaves[1].getDB("admin").runCommand({ replSetGetStatus: 1 })
+ } catch (err) { }
+ return res.myState == 2;
+ }, "Slave 1 state not ready.");
+
+ print("replset3.js SUCCESS");
+
+ replTest.stopSet(15);
}
doTest( 15 );
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index fe1761e..13ee5c9 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -23,15 +23,15 @@ doTest = function (signal) {
master.getDB("barDB").bar.save({ a: 1 });
replTest.awaitReplication();
- // These writes should be replicated immediately
- master.getDB(testDB).foo.insert({ n: 1 });
- master.getDB(testDB).foo.insert({ n: 2 });
- master.getDB(testDB).foo.insert({ n: 3 });
-
- // *** NOTE ***: The default doesn't seem to be propogating.
- // When I run getlasterror with no defaults, the slaves don't have the data:
- // These getlasterror commands can be run individually to verify this.
- //master.getDB("admin").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 });
+ // These writes should be replicated immediately
+ var docNum = 5000;
+ for(var n=0; n<docNum; n++) {
+ master.getDB(testDB).foo.insert({ n: n });
+ }
+
+ // If you want to test failure, just add values for w and wtimeout
+ // to the following command. This will override the default set above and
+ // prevent replication from happening in time for the count tests below.
master.getDB("admin").runCommand({getlasterror: 1});
var slaves = replTest.liveNodes.slaves;
@@ -40,31 +40,15 @@ doTest = function (signal) {
print("Testing slave counts");
- // These should all have 3 documents, but they don't always.
- var master1count = master.getDB(testDB).foo.count();
- assert( master1count == 3, "Master has " + master1count + " of 3 documents!");
-
var slave0count = slaves[0].getDB(testDB).foo.count();
- assert( slave0count == 3, "Slave 0 has " + slave0count + " of 3 documents!");
+ assert( slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
var slave1count = slaves[1].getDB(testDB).foo.count();
- assert( slave1count == 3, "Slave 1 has " + slave1count + " of 3 documents!");
+ assert( slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
- print("Testing slave 0");
+ var master1count = master.getDB(testDB).foo.count();
+ assert( master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
- var s0 = slaves[0].getDB(testDB).foo.find();
- assert(s0.next()['n']);
- assert(s0.next()['n']);
- assert(s0.next()['n']);
-
- print("Testing slave 1");
-
- var s1 = slaves[1].getDB(testDB).foo.find();
- assert(s1.next()['n']);
- assert(s1.next()['n']);
- assert(s1.next()['n']);
-
- // End test
replTest.stopSet(signal);
}
diff --git a/jstests/replsets/replset_remove_node.js b/jstests/replsets/replset_remove_node.js
index fcb754c..9fef721 100644
--- a/jstests/replsets/replset_remove_node.js
+++ b/jstests/replsets/replset_remove_node.js
@@ -33,8 +33,15 @@ doTest = function( signal ) {
config.version = c.version + 1;
config.members = [ { "_id" : 0, "host" : replTest.host + ":31000" },
{ "_id" : 2, "host" : replTest.host + ":31002" } ]
- replTest.initiate( config , 'replSetReconfig' );
+ try {
+ replTest.initiate( config , 'replSetReconfig' );
+ }
+ catch(e) {
+ print(e);
+ }
+
+
// Make sure that a new master comes up
master = replTest.getMaster();
slaves = replTest.liveNodes.slaves;
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 0dd8a3d..0e4c791 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -29,6 +29,8 @@ doTest = function( signal ) {
master.getDB("foo").foo.insert({a: "foo"});
replTest.awaitReplication();
+ assert( ! conns[1].getDB( "admin" ).runCommand( "ismaster" ).secondary , "arbiter shouldn't be secondary" )
+
// Now kill the original master
mId = replTest.getNodeId( master );
replTest.stop( mId );
diff --git a/jstests/replsets/replsetarb3.js b/jstests/replsets/replsetarb3.js
new file mode 100644
index 0000000..1193cf2
--- /dev/null
+++ b/jstests/replsets/replsetarb3.js
@@ -0,0 +1,144 @@
+// @file replsetarb3.js
+// try turning arbiters into non-arbiters and vice versa
+
+/*
+ * 1: initialize set
+ * 2: check m3.state == 7
+ * 3: reconfig
+ * 4: check m3.state == 2
+ * 5: reconfig
+ * 6: check m3.state == 7
+ * 7: reconfig
+ * 8: check m3.state == 2
+ * 9: insert 10000
+ * 10: reconfig
+ * 11: check m3.state == 7
+ */
+
+var debug = false;
+
+var statusSoon = function(s) {
+ assert.soon(function() {
+ var status = master.getDB("admin").runCommand({ replSetGetStatus: 1 });
+ if (debug)
+ printjson(status);
+ return status.members[2].state == s;
+ });
+};
+
+var w = 0;
+var wait = function(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("toostale.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+}
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ a.getDB("foo").bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+var reconfig = function() {
+ config.version++;
+ try {
+ var result = master.getDB("admin").runCommand({replSetReconfig : config});
+ }
+ catch(e) {
+ print(e);
+ }
+ reconnect(master);
+ reconnect(replTest.liveNodes.slaves[1]);
+ sleep(20000);
+};
+
+var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+var nodes = replTest.nodeList();
+
+print(tojson(nodes));
+
+
+var conns = replTest.startSet();
+
+print("1");
+var config = {"_id" : "unicomplex", "members" : [
+ {"_id" : 0, "host" : nodes[0] },
+ {"_id" : 1, "host" : nodes[1] },
+ {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]};
+var r = replTest.initiate(config);
+config.version = 1;
+
+var master = replTest.getMaster();
+
+// Wait for initial replication
+master.getDB("foo").foo.insert({a: "foo"});
+replTest.awaitReplication();
+
+
+print("2");
+statusSoon(7);
+assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
+
+/*
+print("3");
+delete config.members[2].arbiterOnly;
+reconfig();
+
+
+print("4");
+statusSoon(2);
+assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
+
+
+print("5");
+config.members[2].arbiterOnly = true;
+reconfig();
+
+
+print("6");
+statusSoon(7);
+assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
+
+
+print("7");
+delete config.members[2].arbiterOnly;
+reconfig();
+
+
+print("8");
+statusSoon(2);
+assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
+
+
+print("9");
+for (var i = 0; i < 10000; i++) {
+ master.getDB("foo").bar.insert({increment : i, c : 0, foo : "kasdlfjaklsdfalksdfakldfmalksdfmaklmfalkfmkafmdsaklfma", date : new Date(), d : Date()});
+}
+
+
+print("10");
+config.members[2].arbiterOnly = true;
+reconfig();
+
+
+print("11");
+statusSoon(7);
+assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
+*/
+
+replTest.stopSet( 15 );
+
diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js
new file mode 100644
index 0000000..3721ba5
--- /dev/null
+++ b/jstests/replsets/replsetfreeze.js
@@ -0,0 +1,105 @@
+/*
+ * 1: initialize set
+ * 2: step down m1
+ * 3: freeze set for 30 seconds
+ * 4: check no one is master for 30 seconds
+ * 5: check for new master
+ * 6: step down new master
+ * 7: freeze for 30 seconds
+ * 8: unfreeze
+ * 9: check we get a new master within 30 seconds
+ */
+
+
+var w = 0;
+var wait = function(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("toostale.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+}
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ a.getDB("foo").bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+print("1: initialize set");
+var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+var nodes = replTest.nodeList();
+var conns = replTest.startSet();
+var config = {"_id" : "unicomplex", "members" : [
+ {"_id" : 0, "host" : nodes[0] },
+ {"_id" : 1, "host" : nodes[1] },
+ {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]};
+var r = replTest.initiate(config);
+var master = replTest.getMaster();
+
+
+print("2: step down m1");
+try {
+ master.getDB("admin").runCommand({replSetStepDown : 1});
+}
+catch(e) {
+ print(e);
+}
+reconnect(master);
+
+print("3: freeze set for 30 seconds");
+master.getDB("admin").runCommand({replSetFreeze : 30});
+
+
+print("4: check no one is master for 30 seconds");
+var start = (new Date()).getTime();
+while ((new Date()).getTime() - start < 30000) {
+ var result = master.getDB("admin").runCommand({isMaster:1});
+ assert.eq(result.ismaster, false);
+ assert.eq(result.primary, undefined);
+ sleep(1000);
+}
+
+
+print("5: check for new master");
+master = replTest.getMaster();
+
+
+print("6: step down new master");
+try {
+ master.getDB("admin").runCommand({replSetStepDown : 1});
+}
+catch(e) {
+ print(e);
+}
+reconnect(master);
+
+
+print("7: freeze for 30 seconds");
+master.getDB("admin").runCommand({replSetFreeze : 30});
+sleep(1000);
+
+
+print("8: unfreeze");
+master.getDB("admin").runCommand({replSetFreeze : 0});
+
+
+print("9: check we get a new master within 30 seconds");
+master = replTest.getMaster();
+
+
+replTest.stopSet( 15 );
+
diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js
index 8840371..6370e41 100644
--- a/jstests/replsets/rollback.js
+++ b/jstests/replsets/rollback.js
@@ -1,155 +1,186 @@
-// test rollback in replica sets
-
-// try running as :
-//
-// mongo --nodb rollback.js | tee out | grep -v ^m31
-//
-
-var debugging = 0;
-
-function pause(s) {
- print(s);
- while (debugging) {
- sleep(3000);
- print(s);
- }
-}
-
-function deb(obj) {
- if( debugging ) {
- print("\n\n\n" + obj + "\n\n");
- }
-}
-
-w = 0;
-
-function wait(f) {
- w++;
- var n = 0;
- while (!f()) {
- if( n % 4 == 0 )
- print("rollback.js waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- sleep(1000);
- }
-}
-
-doTest = function (signal) {
-
- var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
- var nodes = replTest.nodeList();
- //print(tojson(nodes));
-
- var conns = replTest.startSet();
- var r = replTest.initiate({ "_id": "unicomplex",
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
-
- // Make sure we have a master
- var master = replTest.getMaster();
- a_conn = conns[0];
- A = a_conn.getDB("admin");
- b_conn = conns[1];
- a_conn.setSlaveOk();
- b_conn.setSlaveOk();
- B = b_conn.getDB("admin");
- assert(master == conns[0], "conns[0] assumed to be master");
- assert(a_conn == master);
-
- //deb(master);
-
- // Make sure we have an arbiter
- assert.soon(function () {
- res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
- return res.myState == 7;
- }, "Arbiter failed to initialize.");
-
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
-
- /* force the oplog to roll */
- if (new Date() % 2 == 0) {
- print("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
- var pass = 1;
- var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
- a.roll.insert({ x: 1 });
- while (1) {
- for (var i = 0; i < 10000; i++)
- a.roll.update({}, { $inc: { x: 1} });
- var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
- if (tojson(op.h) != tojson(first.h)) {
- printjson(op);
- printjson(first);
- break;
- }
- pass++;
- a.getLastError(2); // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that.
- }
- print("PASSES FOR OPLOG ROLL: " + pass);
- }
- else {
- print("NO ROLL");
- }
-
- a.bar.insert({ q: 1, a: "foo" });
- a.bar.insert({ q: 2, a: "foo", x: 1 });
- a.bar.insert({ q: 3, bb: 9, a: "foo" });
-
- assert(a.bar.count() == 3, "t.count");
-
- // wait for secondary to get this data
- wait(function () { return b.bar.count() == 3; });
-
- A.runCommand({ replSetTest: 1, blind: true });
- wait(function () { return B.isMaster().ismaster; });
-
- b.bar.insert({ q: 4 });
- b.bar.insert({ q: 5 });
- b.bar.insert({ q: 6 });
- assert(b.bar.count() == 6, "u.count");
-
- // a should not have the new data as it was in blind state.
- B.runCommand({ replSetTest: 1, blind: true });
- A.runCommand({ replSetTest: 1, blind: false });
- wait(function () { return !B.isMaster().ismaster; });
- wait(function () { return A.isMaster().ismaster; });
-
- assert(a.bar.count() == 3, "t is 3");
- a.bar.insert({ q: 7 });
- a.bar.insert({ q: 8 });
- {
- assert(a.bar.count() == 5);
- var x = a.bar.find().toArray();
- assert(x[0].q == 1, '1');
- assert(x[1].q == 2, '2');
- assert(x[2].q == 3, '3');
- assert(x[3].q == 7, '7');
- assert(x[4].q == 8, '8');
- }
-
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
-
- // bring B back online
- B.runCommand({ replSetTest: 1, blind: false });
-
- wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
-
- // everyone is up here...
- assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
- assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
-
- friendlyEqual(a.bar.find().sort({ _id: 1 }).toArray(), b.bar.find().sort({ _id: 1 }).toArray(), "server data sets do not match");
-
- pause("rollback.js SUCCESS");
- replTest.stopSet(signal);
+// test rollback in replica sets
+
+// try running as :
+//
+// mongo --nodb rollback.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("rollback.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
}
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+
+ /* force the oplog to roll */
+ if (new Date() % 2 == 0) {
+ print("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
+ var pass = 1;
+ var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ a.roll.insert({ x: 1 });
+ while (1) {
+ for (var i = 0; i < 10000; i++)
+ a.roll.update({}, { $inc: { x: 1} });
+ var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ if (tojson(op.h) != tojson(first.h)) {
+ printjson(op);
+ printjson(first);
+ break;
+ }
+ pass++;
+ a.getLastError(2); // unlikely secondary isn't keeping up, but let's avoid possible intermittent issues with that.
+ }
+ print("PASSES FOR OPLOG ROLL: " + pass);
+ }
+ else {
+ print("NO ROLL");
+ }
+
+ a.bar.insert({ q: 1, a: "foo" });
+ a.bar.insert({ q: 2, a: "foo", x: 1 });
+ a.bar.insert({ q: 3, bb: 9, a: "foo" });
+
+ assert(a.bar.count() == 3, "t.count");
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == 3; });
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ reconnect(a,b);
+ wait(function () { return B.isMaster().ismaster; });
+
+ b.bar.insert({ q: 4 });
+ b.bar.insert({ q: 5 });
+ b.bar.insert({ q: 6 });
+ assert(b.bar.count() == 6, "u.count");
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ print("*************** wait for server to reconnect ****************");
+ reconnect(a,b);
+ A.runCommand({ replSetTest: 1, blind: false });
+ reconnect(a,b);
+
+ print("*************** B ****************");
+ wait(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ print("*************** A ****************");
+ reconnect(a,b);
+ wait(function () {
+ try {
+ return A.isMaster().ismaster;
+ } catch(e) {
+ return false;
+ }
+ });
+
+ assert(a.bar.count() == 3, "t is 3");
+ a.bar.insert({ q: 7 });
+ a.bar.insert({ q: 8 });
+ {
+ assert(a.bar.count() == 5);
+ var x = a.bar.find().toArray();
+ assert(x[0].q == 1, '1');
+ assert(x[1].q == 2, '2');
+ assert(x[2].q == 3, '3');
+ assert(x[3].q == 7, '7');
+ assert(x[4].q == 8, '8');
+ }
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ B.runCommand({ replSetTest: 1, blind: false });
+ reconnect(a,b);
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+ replTest.awaitReplication();
+
+ friendlyEqual(a.bar.find().sort({ _id: 1 }).toArray(), b.bar.find().sort({ _id: 1 }).toArray(), "server data sets do not match");
+
+ pause("rollback.js SUCCESS");
+ replTest.stopSet(signal);
+};
+
+
+var reconnect = function(a,b) {
+ wait(function() {
+ try {
+ a.bar.stats();
+ b.bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
print("rollback.js");
doTest( 15 );
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index 483d221..46fb548 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -1,201 +1,232 @@
-// test rollback in replica sets
-
-// try running as :
-//
-// mongo --nodb rollback.js | tee out | grep -v ^m31
-//
-
-var debugging = 0;
-
-function pause(s) {
- print(s);
- while (debugging) {
- sleep(3000);
- print(s);
- }
-}
-
-function deb(obj) {
- if( debugging ) {
- print("\n\n\n" + obj + "\n\n");
- }
-}
-
-w = 0;
-
-function wait(f) {
- w++;
- var n = 0;
- while (!f()) {
- if (n % 4 == 0)
- print("rollback2.js waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- sleep(1000);
- }
-}
-
-function dbs_match(a, b) {
- print("dbs_match");
-
- var ac = a.system.namespaces.find().sort({name:1}).toArray();
- var bc = b.system.namespaces.find().sort({name:1}).toArray();
- if (!friendlyEqual(ac, bc)) {
- print("dbs_match: namespaces don't match");
- print("\n\n");
- printjson(ac);
- print("\n\n");
- printjson(bc);
- print("\n\n");
- return false;
- }
-
- var c = a.getCollectionNames();
- for( var i in c ) {
- print("checking " + c[i]);
- if( !friendlyEqual( a[c[i]].find().sort({_id:1}).toArray(), b[c[i]].find().sort({_id:1}).toArray() ) ) {
- print("dbs_match: collections don't match " + c[i]);
- return false;
- }
- }
- return true;
-}
-
-/* these writes will be initial data and replicate everywhere. */
-function doInitialWrites(db) {
- t = db.bar;
- t.insert({ q:0});
- t.insert({ q: 1, a: "foo" });
- t.insert({ q: 2, a: "foo", x: 1 });
- t.insert({ q: 3, bb: 9, a: "foo" });
- t.insert({ q: 40, a: 1 });
- t.insert({ q: 40, a: 2 });
- t.insert({ q: 70, txt: 'willremove' });
-
- db.createCollection("kap", { capped: true, size: 5000 });
- db.kap.insert({ foo: 1 })
-
- // going back to empty on capped is a special case and must be tested
- db.createCollection("kap2", { capped: true, size: 5501 });
-}
-
-/* these writes on one primary only and will be rolled back. */
-function doItemsToRollBack(db) {
- t = db.bar;
- t.insert({ q: 4 });
- t.update({ q: 3 }, { q: 3, rb: true });
-
- t.remove({ q: 40 }); // multi remove test
-
- t.update({ q: 2 }, { q: 39, rb: true });
-
- // rolling back a delete will involve reinserting the item(s)
- t.remove({ q: 1 });
-
- t.update({ q: 0 }, { $inc: { y: 1} });
-
- db.kap.insert({ foo: 2 })
- db.kap2.insert({ foo: 2 })
-
- // create a collection (need to roll back the whole thing)
- db.newcoll.insert({ a: true });
-
- // create a new empty collection (need to roll back the whole thing)
- db.createCollection("abc");
-}
-
-function doWritesToKeep2(db) {
- t = db.bar;
- t.insert({ txt: 'foo' });
- t.remove({ q: 70 });
- t.update({ q: 0 }, { $inc: { y: 33} });
-}
-
-function verify(db) {
- print("verify");
- t = db.bar;
- assert(t.find({ q: 1 }).count() == 1);
- assert(t.find({ txt: 'foo' }).count() == 1);
- assert(t.find({ q: 4 }).count() == 0);
-}
-
-doTest = function (signal) {
-
- var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
- var nodes = replTest.nodeList();
- //print(tojson(nodes));
-
- var conns = replTest.startSet();
- var r = replTest.initiate({ "_id": "unicomplex",
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
-
- // Make sure we have a master
- var master = replTest.getMaster();
- a_conn = conns[0];
- A = a_conn.getDB("admin");
- b_conn = conns[1];
- a_conn.setSlaveOk();
- b_conn.setSlaveOk();
- B = b_conn.getDB("admin");
- assert(master == conns[0], "conns[0] assumed to be master");
- assert(a_conn == master);
-
- //deb(master);
-
- // Make sure we have an arbiter
- assert.soon(function () {
- res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
- return res.myState == 7;
- }, "Arbiter failed to initialize.");
-
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
- doInitialWrites(a);
-
- // wait for secondary to get this data
- wait(function () { return b.bar.count() == a.bar.count(); });
-
- A.runCommand({ replSetTest: 1, blind: true });
- wait(function () { return B.isMaster().ismaster; });
-
- doItemsToRollBack(b);
-
- // a should not have the new data as it was in blind state.
- B.runCommand({ replSetTest: 1, blind: true });
- A.runCommand({ replSetTest: 1, blind: false });
- wait(function () { return !B.isMaster().ismaster; });
- wait(function () { return A.isMaster().ismaster; });
-
- assert(a.bar.count() >= 1, "count check");
- doWritesToKeep2(a);
-
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
-
- // bring B back online
- // as A is primary, B will roll back and then catch up
- B.runCommand({ replSetTest: 1, blind: false });
-
- wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
-
- // everyone is up here...
- assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
- assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
-
- verify(a);
-
- assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
-
- pause("rollback2.js SUCCESS");
- replTest.stopSet(signal);
+// a test of rollback in replica sets
+//
+// try running as :
+//
+// mongo --nodb rollback2.js | tee out | grep -v ^m31
+//
+
+var debugging = 0;
+
+function pause(s) {
+ print(s);
+ while (debugging) {
+ sleep(3000);
+ print(s);
+ }
+}
+
+function deb(obj) {
+ if( debugging ) {
+ print("\n\n\n" + obj + "\n\n");
+ }
+}
+
+w = 0;
+
+function wait(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if (n % 4 == 0)
+ print("rollback2.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+}
+
+function dbs_match(a, b) {
+ print("dbs_match");
+
+ var ac = a.system.namespaces.find().sort({name:1}).toArray();
+ var bc = b.system.namespaces.find().sort({name:1}).toArray();
+ if (!friendlyEqual(ac, bc)) {
+ print("dbs_match: namespaces don't match");
+ print("\n\n");
+ printjson(ac);
+ print("\n\n");
+ printjson(bc);
+ print("\n\n");
+ return false;
+ }
+
+ var c = a.getCollectionNames();
+ for( var i in c ) {
+ print("checking " + c[i]);
+ if( !friendlyEqual( a[c[i]].find().sort({_id:1}).toArray(), b[c[i]].find().sort({_id:1}).toArray() ) ) {
+ print("dbs_match: collections don't match " + c[i]);
+ return false;
+ }
+ }
+ return true;
+}
+
+/* these writes will be initial data and replicate everywhere. */
+function doInitialWrites(db) {
+ t = db.bar;
+ t.insert({ q:0});
+ t.insert({ q: 1, a: "foo" });
+ t.insert({ q: 2, a: "foo", x: 1 });
+ t.insert({ q: 3, bb: 9, a: "foo" });
+ t.insert({ q: 40, a: 1 });
+ t.insert({ q: 40, a: 2 });
+ t.insert({ q: 70, txt: 'willremove' });
+
+ db.createCollection("kap", { capped: true, size: 5000 });
+ db.kap.insert({ foo: 1 })
+
+ // going back to empty on capped is a special case and must be tested
+ db.createCollection("kap2", { capped: true, size: 5501 });
+}
+
+/* these writes on one primary only and will be rolled back. */
+function doItemsToRollBack(db) {
+ t = db.bar;
+ t.insert({ q: 4 });
+ t.update({ q: 3 }, { q: 3, rb: true });
+
+ t.remove({ q: 40 }); // multi remove test
+
+ t.update({ q: 2 }, { q: 39, rb: true });
+
+ // rolling back a delete will involve reinserting the item(s)
+ t.remove({ q: 1 });
+
+ t.update({ q: 0 }, { $inc: { y: 1} });
+
+ db.kap.insert({ foo: 2 })
+ db.kap2.insert({ foo: 2 })
+
+ // create a collection (need to roll back the whole thing)
+ db.newcoll.insert({ a: true });
+
+ // create a new empty collection (need to roll back the whole thing)
+ db.createCollection("abc");
}
+function doWritesToKeep2(db) {
+ t = db.bar;
+ t.insert({ txt: 'foo' });
+ t.remove({ q: 70 });
+ t.update({ q: 0 }, { $inc: { y: 33} });
+}
+
+function verify(db) {
+ print("verify");
+ t = db.bar;
+ assert(t.find({ q: 1 }).count() == 1);
+ assert(t.find({ txt: 'foo' }).count() == 1);
+ assert(t.find({ q: 4 }).count() == 0);
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3 });
+ var nodes = replTest.nodeList();
+ //print(tojson(nodes));
+
+ var conns = replTest.startSet();
+ var r = replTest.initiate({ "_id": "unicomplex",
+ "members": [
+ { "_id": 0, "host": nodes[0] },
+ { "_id": 1, "host": nodes[1] },
+ { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ });
+
+ // Make sure we have a master
+ var master = replTest.getMaster();
+ a_conn = conns[0];
+ A = a_conn.getDB("admin");
+ b_conn = conns[1];
+ a_conn.setSlaveOk();
+ b_conn.setSlaveOk();
+ B = b_conn.getDB("admin");
+ assert(master == conns[0], "conns[0] assumed to be master");
+ assert(a_conn == master);
+
+ //deb(master);
+
+ // Make sure we have an arbiter
+ assert.soon(function () {
+ res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
+
+ // Wait for initial replication
+ var a = a_conn.getDB("foo");
+ var b = b_conn.getDB("foo");
+ wait(function () {
+ var status = A.runCommand({replSetGetStatus : 1});
+ return status.members[1].state == 2;
+ });
+
+ doInitialWrites(a);
+
+ // wait for secondary to get this data
+ wait(function () { return b.bar.count() == a.bar.count(); });
+ wait(function () {
+ var status = A.runCommand({replSetGetStatus : 1});
+ return status.members[1].state == 2;
+ });
+
+
+ A.runCommand({ replSetTest: 1, blind: true });
+ reconnect(a, b);
+
+ wait(function () { return B.isMaster().ismaster; });
+
+ doItemsToRollBack(b);
+
+ // a should not have the new data as it was in blind state.
+ B.runCommand({ replSetTest: 1, blind: true });
+ reconnect(a, b);
+ A.runCommand({ replSetTest: 1, blind: false });
+ reconnect(a,b);
+
+ wait(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ wait(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } });
+
+ assert(a.bar.count() >= 1, "count check");
+ doWritesToKeep2(a);
+
+ // A is 1 2 3 7 8
+ // B is 1 2 3 4 5 6
+
+ // bring B back online
+ // as A is primary, B will roll back and then catch up
+ B.runCommand({ replSetTest: 1, blind: false });
+ reconnect(a,b);
+
+ wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
+
+ // everyone is up here...
+ assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
+ assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
+ replTest.awaitReplication();
+
+ verify(a);
+
+ assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
+
+ pause("rollback2.js SUCCESS");
+ replTest.stopSet(signal);
+};
+
+var reconnect = function(a,b) {
+ wait(function() {
+ try {
+ a.bar.stats();
+ b.bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
print("rollback2.js");
doTest( 15 );
diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js
index 5c2f2f1..fa923d8 100755
--- a/jstests/replsets/rollback3.js
+++ b/jstests/replsets/rollback3.js
@@ -30,10 +30,10 @@ function wait(f) {
if (n % 4 == 0)
print("rollback3.js waiting " + w);
if (++n == 4) {
- print("" + f);
- }
- if (n == 200) {
- print("rollback3.js failing waited too long");
+ print("" + f);
+ }
+ if (n == 200) {
+ print("rollback3.js failing waited too long");
throw "wait error";
}
sleep(1000);
@@ -188,15 +188,20 @@ doTest = function (signal) {
wait(function () { return b.bar.count() == a.bar.count(); });
A.runCommand({ replSetTest: 1, blind: true });
- wait(function () { return B.isMaster().ismaster; });
+ reconnect(a,b);
+ wait(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } });
doItemsToRollBack(b);
// a should not have the new data as it was in blind state.
B.runCommand({ replSetTest: 1, blind: true });
+ reconnect(a,b);
+
A.runCommand({ replSetTest: 1, blind: false });
- wait(function () { return !B.isMaster().ismaster; });
- wait(function () { return A.isMaster().ismaster; });
+ reconnect(a,b);
+
+ wait(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ wait(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } });
assert(a.bar.count() >= 1, "count check");
doWritesToKeep2(a);
@@ -207,18 +212,34 @@ doTest = function (signal) {
// bring B back online
// as A is primary, B will roll back and then catch up
B.runCommand({ replSetTest: 1, blind: false });
+ reconnect(a,b);
wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
// everyone is up here...
assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
-
+ replTest.awaitReplication();
+
assert( dbs_match(a,b), "server data sets do not match after rollback, something is wrong");
pause("rollback3.js SUCCESS");
replTest.stopSet(signal);
-}
+};
+
+
+var reconnect = function(a,b) {
+ wait(function() {
+ try {
+ a.bar.stats();
+ b.bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
print("rollback3.js");
doTest( 15 );
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
new file mode 100644
index 0000000..c072829
--- /dev/null
+++ b/jstests/replsets/rslib.js
@@ -0,0 +1,63 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ a.getDB("foo").bar.stats();
+ }
+ else {
+ a.bar.stats();
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
diff --git a/jstests/replsets/slaveDelay2.js b/jstests/replsets/slaveDelay2.js
new file mode 100644
index 0000000..2d9dd1f
--- /dev/null
+++ b/jstests/replsets/slaveDelay2.js
@@ -0,0 +1,104 @@
+
+var name = "slaveDelay2";
+var host = getHostName();
+
+var waitForAllMembers = function(master) {
+ var ready = false;
+
+ outer:
+ while (true) {
+ var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+
+ for (var m in state.members) {
+ if (state.members[m].state != 2 && state.members[m].state != 1) {
+ sleep(10000);
+ continue outer;
+ }
+ }
+
+ printjson(state);
+ print("okay, everyone is primary or secondary");
+ return;
+ }
+};
+
+
+var initialize = function() {
+ var replTest = new ReplSetTest( {name: name, nodes: 1} );
+
+ var nodes = replTest.startSet();
+
+ replTest.initiate();
+
+ var master = replTest.getMaster().getDB(name);
+
+ waitForAllMembers(master);
+
+ return replTest;
+};
+
+var populate = function(master) {
+ // insert records
+ for (var i =0; i<1000; i++) {
+ master.foo.insert({_id:1});
+ }
+
+ master.runCommand({getlasterror:1});
+}
+
+doTest = function( signal ) {
+ var replTest = initialize();
+ var master = replTest.getMaster().getDB(name);
+ populate(master);
+ var admin = master.getSisterDB("admin");
+
+ /**
+ * start a slave with a long delay (1 hour) and do some writes while it is
+ * initializing. Make sure it syncs all of these writes before going into
+ * syncDelay.
+ */
+ var conn = startMongodTest(31008, name + "-sd", 0, { useHostname: true, replSet: name });
+ conn.setSlaveOk();
+
+ config = master.getSisterDB("local").system.replset.findOne();
+ config.version++;
+ config.members.push({_id : 1, host : host+":31008",priority:0, slaveDelay:3600});
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+
+ // do inserts during initial sync
+ count = 0;
+ while (count < 10) {
+ for (var i = 100*count; i<100*(count+1); i++) {
+ master.foo.insert({x:i});
+ }
+
+ //check if initial sync is done
+ var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ printjson(state);
+ if (state.members[1].state == 2) {
+ break;
+ }
+
+ count++;
+ }
+
+ // throw out last 100 inserts, but make sure the others were applied
+ if (count == 0) {
+ print("NOTHING TO CHECK");
+ replTest.stopSet();
+ return;
+ }
+
+ // wait a bit for the syncs to be applied
+ waitForAllMembers(master);
+
+ for (var i=0; i<(100*count); i++) {
+ var obj = conn.getDB(name).foo.findOne({x : i});
+ assert(obj);
+ }
+
+ replTest.stopSet();
+}
+
+doTest(15);
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
new file mode 100644
index 0000000..e549822
--- /dev/null
+++ b/jstests/replsets/slavedelay1.js
@@ -0,0 +1,127 @@
+
+var waitForAllMembers = function(master) {
+ var ready = false;
+
+ outer:
+ while (true) {
+ var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ printjson(state);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 2 && state.members[m].state != 1) {
+ sleep(10000);
+ continue outer;
+ }
+ }
+ return;
+ }
+};
+
+
+doTest = function( signal ) {
+
+ var name = "slaveDelay";
+ var host = getHostName();
+
+ var replTest = new ReplSetTest( {name: name, nodes: 3} );
+
+ var nodes = replTest.startSet();
+
+ /* set slaveDelay to 30 seconds */
+ var config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.members[2].slaveDelay = 30;
+
+ replTest.initiate(config);
+
+ var master = replTest.getMaster().getDB(name);
+ var slaveConns = replTest.liveNodes.slaves;
+ var slave = [];
+ for (var i in slaveConns) {
+ var d = slaveConns[i].getDB(name);
+ d.getMongo().setSlaveOk();
+ slave.push(d);
+ }
+
+ waitForAllMembers(master);
+
+ // insert a record
+ master.foo.insert({x:1});
+ master.runCommand({getlasterror:1, w:2});
+
+ var doc = master.foo.findOne();
+ assert.eq(doc.x, 1);
+
+ // make sure slave has it
+ var doc = slave[0].foo.findOne();
+ assert.eq(doc.x, 1);
+
+ // make sure delayed slave doesn't have it
+ assert.eq(slave[1].foo.findOne(), null);
+
+ // wait 35 seconds
+ sleep(35000);
+
+ // now delayed slave should have it
+ assert.eq(slave[1].foo.findOne().x, 1);
+
+
+ /************* Part 2 *******************/
+
+ // how about non-initial sync?
+
+ for (var i=0; i<100; i++) {
+ master.foo.insert({_id : i, "foo" : "bar"});
+ }
+ master.runCommand({getlasterror:1,w:2});
+
+ assert.eq(master.foo.findOne({_id : 99}).foo, "bar");
+ assert.eq(slave[0].foo.findOne({_id : 99}).foo, "bar");
+ assert.eq(slave[1].foo.findOne({_id : 99}), null);
+
+ sleep(35000);
+
+ assert.eq(slave[1].foo.findOne({_id : 99}).foo, "bar");
+
+ /************* Part 3 *******************/
+
+ // how about if we add a new server? will it sync correctly?
+
+ var conn = startMongodTest( 31007 , name+"-part3" , 0 , {useHostname : true, replSet : name} );
+
+ config = master.getSisterDB("local").system.replset.findOne();
+ printjson(config);
+ config.version++;
+ config.members.push({_id : 3, host : host+":31007",priority:0, slaveDelay:10});
+
+ var admin = master.getSisterDB("admin");
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = replTest.getMaster().getDB(name);
+
+ waitForAllMembers(master);
+
+ sleep(15000);
+
+ // it should be all caught up now
+
+ master.foo.insert({_id : 123, "x" : "foo"});
+ master.runCommand({getlasterror:1,w:2});
+
+ conn.setSlaveOk();
+ assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
+
+ sleep(15000);
+
+ assert.eq(conn.getDB(name).foo.findOne({_id:123}).x, "foo");
+
+ replTest.stopSet();
+}
+
+doTest(15);
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
index e60d128..af16044 100644
--- a/jstests/replsets/sync1.js
+++ b/jstests/replsets/sync1.js
@@ -1,5 +1,7 @@
// test rollback of replica sets
+load("jstests/replsets/rslib.js");
+
var debugging=0;
w = 0;
@@ -50,7 +52,7 @@ doTest = function (signal) {
dbs[0].bar.ensureIndex({ w: 1 });
var ok = false;
- var inserts = 100000;
+ var inserts = 10000;
print("\nsync1.js ********************************************************************** part 5");
@@ -62,7 +64,7 @@ doTest = function (signal) {
do {
sleep(1000);
status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
- } while (status.members[1].state != 2 && status.members[2].state != 2);
+ } while (status.members[1].state != 2 || status.members[2].state != 2);
print("\nsync1.js ********************************************************************** part 6");
dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
@@ -125,12 +127,14 @@ doTest = function (signal) {
try {
printjson(dbs[1].isMaster());
printjson(dbs[1].bar.count());
+ printjson(dbs[1].adminCommand({replSetGetStatus : 1}));
}
catch (e) { print(e); }
print("dbs[2]:");
try {
printjson(dbs[2].isMaster());
printjson(dbs[2].bar.count());
+ printjson(dbs[2].adminCommand({replSetGetStatus : 1}));
}
catch (e) { print(e); }
assert(false, "sync1.js too many exceptions, failing");
@@ -161,10 +165,22 @@ doTest = function (signal) {
print("\nsync1.js ********************************************************************** part 10");
// now, let's see if rollback works
- var result = dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: false });
+ wait(function() {
+ try {
+ dbs[0].adminCommand({ replSetTest: 1, blind: false });
+ }
+ catch(e) {
+ print(e);
+ }
+ reconnect(dbs[0]);
+ reconnect(dbs[1]);
+
+ var status = dbs[1].adminCommand({replSetGetStatus:1});
+ return status.members[0].health == 1;
+ });
+
+
dbs[0].getMongo().setSlaveOk();
-
- printjson(result);
sleep(5000);
// now this should resync
@@ -192,6 +208,10 @@ doTest = function (signal) {
count++;
if (count == 100) {
+ printjson(dbs[0].isMaster());
+ printjson(dbs[0].adminCommand({replSetGetStatus:1}));
+ printjson(dbs[1].isMaster());
+ printjson(dbs[1].adminCommand({replSetGetStatus:1}));
pause("FAIL part 11");
assert(false, "replsets/\nsync1.js fails timing out");
replTest.stopSet(signal);
diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js
new file mode 100644
index 0000000..d3e8ef4
--- /dev/null
+++ b/jstests/replsets/sync_passive.js
@@ -0,0 +1,89 @@
+/**
+ * Test syncing from non-primaries.
+ *
+ * Start a set.
+ * Inital sync.
+ * Kill member 1.
+ * Add some data.
+ * Kill member 0.
+ * Restart member 1.
+ * Check that it syncs.
+ * Add some data.
+ * Kill member 1.
+ * Restart member 0.
+ * Check that it syncs.
+ */
+
+load("jstests/replsets/rslib.js");
+
+var name = "sync_passive";
+var host = getHostName();
+
+var replTest = new ReplSetTest( {name: name, nodes: 3} );
+
+var nodes = replTest.startSet();
+
+/* set slaveDelay to 30 seconds */
+var config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+
+replTest.initiate(config);
+
+var master = replTest.getMaster().getDB("test");
+var server0 = master;
+var server1 = replTest.liveNodes.slaves[0];
+
+print("Initial sync");
+for (var i=0;i<100;i++) {
+ master.foo.insert({x:i});
+}
+replTest.awaitReplication();
+
+
+print("stop #1");
+replTest.stop(1);
+
+
+print("add some data");
+for (var i=0;i<1000;i++) {
+ master.bar.insert({x:i});
+}
+replTest.awaitReplication();
+
+
+print("stop #0");
+replTest.stop(0);
+
+
+print("restart #1");
+replTest.restart(1);
+
+
+print("check sync");
+replTest.awaitReplication();
+
+
+print("add data");
+reconnect(server1);
+master = replTest.getMaster().getDB("test");
+for (var i=0;i<1000;i++) {
+ master.bar.insert({x:i});
+}
+replTest.awaitReplication();
+
+
+print("kill #1");
+replTest.stop(1);
+
+
+print("restart #0");
+replTest.restart(0);
+reconnect(server0);
+
+
+print("wait for sync");
+replTest.awaitReplication();
+
+
+print("bring #1 back up, make sure everything's okay");
+replTest.restart(1);
diff --git a/jstests/replsets/sync_passive2.js b/jstests/replsets/sync_passive2.js
new file mode 100644
index 0000000..230d71c
--- /dev/null
+++ b/jstests/replsets/sync_passive2.js
@@ -0,0 +1,120 @@
+/**
+ * Test syncing from non-primaries.
+ */
+
+load("jstests/replsets/rslib.js");
+
+var name = "sync_passive2";
+var host = getHostName();
+
+var replTest = new ReplSetTest( {name: name, nodes: 5} );
+var nodes = replTest.startSet();
+
+// 0: master
+// 1: arbiter
+// 2: slave a
+// 3: slave b
+// 4: slave c
+var config = replTest.getReplSetConfig();
+config.members[1].arbiterOnly = true;
+for (i=2; i<config.members.length; i++) {
+ config.members[i].priority = 0;
+}
+replTest.initiate(config);
+
+var master = replTest.getMaster().getDB("test");
+
+print("Initial sync");
+for (var i=0;i<10000;i++) {
+ master.foo.insert({x:i, foo:"bar", msg : "all the talk on the market", date : [new Date(), new Date(), new Date()]});
+}
+replTest.awaitReplication();
+
+
+print("stop c");
+replTest.stop(4);
+
+
+print("add data");
+for (var i=0;i<10000;i++) {
+ master.foo.insert({x:i, foo:"bar", msg : "all the talk on the market", date : [new Date(), new Date(), new Date()]});
+}
+replTest.awaitReplication();
+
+
+print("stop b");
+replTest.stop(3);
+
+
+print("add data");
+for (var i=0;i<10000;i++) {
+ master.foo.insert({x:i, foo:"bar", msg : "all the talk on the market", date : [new Date(), new Date(), new Date()]});
+}
+replTest.awaitReplication();
+
+
+print("kill master");
+replTest.stop(0);
+replTest.stop(2);
+
+
+// now we have just the arbiter up
+
+print("restart c");
+replTest.restart(4);
+print("restart b");
+replTest.restart(3);
+
+
+print("wait for sync");
+wait(function() {
+ var status = replTest.liveNodes.slaves[0].getDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() {
+ printjson(status);
+ print("1: " + status.members +" 2: "+(status.members[3].state == 2)+" 3: "+ (status.members[4].state == 2)
+ + " 4: "+friendlyEqual(status.members[3].optime, status.members[4].optime));
+ });
+
+ return status.members &&
+ status.members[3].state == 2 &&
+ status.members[4].state == 2 &&
+ friendlyEqual(status.members[3].optime, status.members[4].optime);
+ });
+
+
+print("restart a");
+replTest.restart(2);
+print("wait for sync2");
+wait(function() {
+ var status = replTest.liveNodes.slaves[0].getDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() {
+ printjson(status);
+ print("1: " + status.members +" 2a: "+(status.members[3].state == 2)+" 2: "+
+ (status.members[3].state == 2)+" 3: "+ (status.members[4].state == 2)
+ + " 4: "+friendlyEqual(status.members[3].optime, status.members[4].optime));
+ });
+
+ return status.members &&
+ status.members[2].state == 2 &&
+ status.members[3].state == 2 &&
+ status.members[4].state == 2 &&
+ friendlyEqual(status.members[3].optime, status.members[4].optime) &&
+ friendlyEqual(status.members[2].optime, status.members[4].optime);
+ });
+
+print("bring master back up, make sure everything's okay");
+replTest.restart(0);
+
+print("wait for sync");
+wait(function() {
+ var status = replTest.liveNodes.slaves[0].getDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() {
+ printjson(status);
+ });
+ return status.members &&
+ status.members[2].state == 2 &&
+ status.members[3].state == 2 &&
+ status.members[4].state == 2 &&
+ friendlyEqual(status.members[3].optime, status.members[4].optime) &&
+ friendlyEqual(status.members[2].optime, status.members[4].optime);
+ });
diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js
new file mode 100644
index 0000000..0b8da0d
--- /dev/null
+++ b/jstests/replsets/toostale.js
@@ -0,0 +1,121 @@
+
+// this tests that:
+// * stale members get into state 3 (recovering)
+// * they stay in state 3 after restarting
+// * they can recover and go into state 2 if someone less up-to-date becomes primary
+
+/*
+ * 1: initial insert
+ * 2: initial sync
+ * 3: blind s2
+ * 4: overflow oplog
+ * 5: unblind s2
+ * 6: check s2.state == 3
+ * 7: restart s2
+ * 8: check s2.state == 3
+ */
+
+
+var w = 0;
+var wait = function(f) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("toostale.js waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up');
+ sleep(1000);
+ }
+}
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ a.bar.stats();
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var name = "toostale"
+var replTest = new ReplSetTest( {name: name, nodes: 3});
+
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+var mdb = master.getDB("foo");
+
+
+print("1: initial insert");
+mdb.foo.save({a: 1000});
+
+
+print("2: initial sync");
+replTest.awaitReplication();
+
+print("3: blind s2");
+replTest.stop(2);
+print("waiting until the master knows the slave is blind");
+assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health == 0 });
+print("okay");
+
+print("4: overflow oplog");
+reconnect(master.getDB("local"));
+var count = master.getDB("local").oplog.rs.count();
+var prevCount = -1;
+while (count != prevCount) {
+ print("inserting 10000");
+ for (var i = 0; i < 10000; i++) {
+ mdb.bar.insert({x:i, date : new Date(), str : "safkaldmfaksndfkjansfdjanfjkafa"});
+ }
+ prevCount = count;
+ replTest.awaitReplication();
+ count = master.getDB("local").oplog.rs.count();
+ print("count: "+count+" prev: "+prevCount);
+}
+
+
+print("5: unblind s2");
+replTest.restart(2);
+print("waiting until the master knows the slave is not blind");
+assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health != 0 });
+print("okay");
+
+
+print("6: check s2.state == 3");
+var goStale = function() {
+ wait(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members[2].state == 3;
+ });
+};
+goStale();
+
+
+print("7: restart s2");
+replTest.stop(2);
+replTest.restart(2);
+
+
+print("8: check s2.state == 3");
+status = master.getDB("admin").runCommand({replSetGetStatus:1});
+while (status.state == 0) {
+ print("state is 0: ");
+ printjson(status);
+ sleep(1000);
+ status = master.getDB("admin").runCommand({replSetGetStatus:1});
+}
+
+printjson(status);
+assert.eq(status.members[2].state, 3, 'recovering');
+
+
+replTest.stopSet(15);
diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js
index 6ae8475..7d1442d 100755
--- a/jstests/replsets/two_initsync.js
+++ b/jstests/replsets/two_initsync.js
@@ -32,6 +32,7 @@ function wait(f) {
if (++n == 4) {
print("" + f);
}
+ assert(n < 200, 'tried 200 times, giving up');
sleep(1000);
}
}
diff --git a/jstests/replsets/twosets.js b/jstests/replsets/twosets.js
index 7cf367b..aae1113 100644
--- a/jstests/replsets/twosets.js
+++ b/jstests/replsets/twosets.js
@@ -5,19 +5,13 @@ doTest = function( signal ) {
var orig = new ReplSetTest( {name: 'testSet', nodes: 3} );
orig.startSet();
+ orig.initiate();
+ var master = orig.getMaster();
var interloper = new ReplSetTest( {name: 'testSet', nodes: 3, startPort : 31003} );
interloper.startSet();
-
- sleep(5000);
-
- orig.initiate();
interloper.initiate();
- sleep(5000);
-
- var master = orig.getMaster();
-
var conf = master.getDB("local").system.replset.findOne();
var nodes = interloper.nodeList();
@@ -26,8 +20,13 @@ doTest = function( signal ) {
conf.members.push({_id : id, host : host});
conf.version++;
- var result = master.getDB("admin").runCommand({replSetReconfig : conf});
-
+ try {
+ var result = master.getDB("admin").runCommand({replSetReconfig : conf});
+ }
+ catch(e) {
+ print(e);
+ }
+
// now... stuff should blow up?
sleep(10);
diff --git a/jstests/rs/rs_basic.js b/jstests/rs/rs_basic.js
index 08de689..e8d124d 100644
--- a/jstests/rs/rs_basic.js
+++ b/jstests/rs/rs_basic.js
@@ -1,51 +1,51 @@
// rs_basic.js
-load("../../jstests/rs/test_framework.js");
-
-function go() {
- assert(__nextPort == 27000, "_nextPort==27000");
-
- a = null;
- try {init
- a = new Mongo("localhost:27000");
- print("using already open mongod on port 27000 -- presume you are debugging or something. should start empty.");
- __nextPort++;
- }
- catch (e) {
- a = rs_mongod();
- }
-
+load("../../jstests/rs/test_framework.js");
+
+function go() {
+ assert(__nextPort == 27000, "_nextPort==27000");
+
+ a = null;
+ try {init
+ a = new Mongo("localhost:27000");
+ print("using already open mongod on port 27000 -- presume you are debugging or something. should start empty.");
+ __nextPort++;
+ }
+ catch (e) {
+ a = rs_mongod();
+ }
+
b = rs_mongod();
- x = a.getDB("admin");
- y = b.getDB("admin");
- memb = [];
- memb[0] = x;
- memb[1] = y;
-
- print("rs_basic.js go(): started 2 servers");
-
- cfg = { _id: 'asdf', members: [] };
- var hn = hostname();
- cfg.members[0] = { _id: 0, host: hn + ":27000" };
- cfg.members[1] = { _id: 1, host: hn + ":27001" };
-
+ x = a.getDB("admin");
+ y = b.getDB("admin");
+ memb = [];
+ memb[0] = x;
+ memb[1] = y;
+
+ print("rs_basic.js go(): started 2 servers");
+
+ cfg = { _id: 'asdf', members: [] };
+ var hn = hostname();
+ cfg.members[0] = { _id: 0, host: hn + ":27000" };
+ cfg.members[1] = { _id: 1, host: hn + ":27001" };
+
print("cfg=" + tojson(cfg));
-}
-
-function init(server) {
- var i = server;
- //i = Random.randInt(2); // a random member of the set
- var m = memb[i];
- assert(!m.ismaster(), "not ismaster");
- var res = m.runCommand({ replSetInitiate: cfg });
- return res;
-}
-
-_path = '../../db/Debug/';
-print("_path var set to " + _path);
-
-print("go() to run");
+}
+
+function init(server) {
+ var i = server;
+ //i = Random.randInt(2); // a random member of the set
+ var m = memb[i];
+ assert(!m.ismaster(), "not ismaster");
+ var res = m.runCommand({ replSetInitiate: cfg });
+ return res;
+}
+
+_path = '../../db/Debug/';
+print("_path var set to " + _path);
+
+print("go() to run");
print("init() to initiate");
@@ -108,7 +108,7 @@ r = function( key , v ){
correct = { a : 2 , b : 1 };
function checkMR( t ){
- var res = t.mapReduce( m , r );
+ var res = t.mapReduce( m , r , "xyz" );
assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
}
diff --git a/jstests/set_param1.js b/jstests/set_param1.js
new file mode 100644
index 0000000..555cb52
--- /dev/null
+++ b/jstests/set_param1.js
@@ -0,0 +1,9 @@
+
+old = db.adminCommand( { "getParameter" : "*" } )
+tmp1 = db.adminCommand( { "setParameter" : 1 , "logLevel" : 5 } )
+tmp2 = db.adminCommand( { "setParameter" : 1 , "logLevel" : old.logLevel } )
+now = db.adminCommand( { "getParameter" : "*" } )
+
+assert.eq( old , now , "A" )
+assert.eq( old.logLevel , tmp1.was , "B" )
+assert.eq( 5 , tmp2.was , "C" )
diff --git a/jstests/sharding/addshard3.js b/jstests/sharding/addshard3.js
new file mode 100644
index 0000000..aa5a21e
--- /dev/null
+++ b/jstests/sharding/addshard3.js
@@ -0,0 +1,9 @@
+
+s = new ShardingTest( "add_shard3", 1 );
+
+var result = s.admin.runCommand({"addshard" : "localhost:31000"});
+
+printjson(result);
+
+assert.eq(result.ok, 0, "don't add mongos as a shard");
+
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
new file mode 100644
index 0000000..be4a8b3
--- /dev/null
+++ b/jstests/sharding/addshard4.js
@@ -0,0 +1,24 @@
+// a replica set's passive nodes should be okay to add as part of a shard config
+
+s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
+
+r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 34000});
+r.startSet();
+
+var config = r.getReplSetConfig();
+config.members[2].priority = 0;
+
+r.initiate(config);
+
+var master = r.getMaster().master;
+
+var members = config.members.map(function(elem) { return elem.host; });
+var shardName = "addshard4/"+members.join(",");
+
+print("adding shard "+shardName);
+
+var result = s.adminCommand({"addshard" : shardName});
+
+printjson(result);
+
+
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 346c43a..bdd43e9 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -32,15 +32,19 @@ print( "datasize: " + tojson( s.getServer( "test" ).getDB( "admin" ).runCommand(
for ( ; i<200; i++ ){
coll.save( { num : i , s : bigString } );
}
+db.getLastError();
s.printChunks()
+s.printChangeLog()
counts.push( s.config.chunks.count() );
for ( ; i<400; i++ ){
coll.save( { num : i , s : bigString } );
}
+db.getLastError();
s.printChunks();
+s.printChangeLog()
counts.push( s.config.chunks.count() );
for ( ; i<700; i++ ){
@@ -49,6 +53,7 @@ for ( ; i<700; i++ ){
db.getLastError();
s.printChunks();
+s.printChangeLog()
counts.push( s.config.chunks.count() );
assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) )
diff --git a/jstests/sharding/bigMapReduce.js b/jstests/sharding/bigMapReduce.js
index 1cc12f4..3cc1d66 100644
--- a/jstests/sharding/bigMapReduce.js
+++ b/jstests/sharding/bigMapReduce.js
@@ -7,11 +7,69 @@ db = s.getDB( "test" );
var str=""
for (i=0;i<4*1024;i++) { str=str+"a"; }
for (j=0; j<50; j++) for (i=0; i<512; i++){ db.foo.save({y:str})}
+db.getLastError();
+
+s.printChunks();
+s.printChangeLog();
function map() { emit('count', 1); }
function reduce(key, values) { return Array.sum(values) }
-out = db.foo.mapReduce(map, reduce)
-printjson(out) // SERVER-1400
+gotAGoodOne = false;
+
+for ( iter=0; iter<5; iter++ ){
+ try {
+ out = db.foo.mapReduce(map, reduce,"big_out")
+ gotAGoodOne = true
+ }
+ catch ( e ){
+ if ( __mrerror__ && __mrerror__.cause && __mrerror__.cause.assertionCode == 13388 ){
+ // TODO: SERVER-2396
+ sleep( 1000 );
+ continue;
+ }
+ printjson( __mrerror__ );
+ throw e;
+ }
+}
+assert( gotAGoodOne , "no good for basic" )
+
+gotAGoodOne = false;
+// test output to a different DB
+// do it multiple times so that primary shard changes
+for (iter = 0; iter < 5; iter++) {
+ outCollStr = "mr_replace_col_" + iter;
+ outDbStr = "mr_db_" + iter;
+
+ print("Testing mr replace into DB " + iter)
+
+ try {
+ res = db.foo.mapReduce( map , reduce , { out : { replace: outCollStr, db: outDbStr } } )
+ gotAGoodOne = true;
+ }
+ catch ( e ){
+ if ( __mrerror__ && __mrerror__.cause && __mrerror__.cause.assertionCode == 13388 ){
+ // TODO: SERVER-2396
+ sleep( 1000 );
+ continue;
+ }
+ printjson( __mrerror__ );
+ throw e;
+ }
+ printjson(res);
+
+ outDb = s.getDB(outDbStr);
+ outColl = outDb[outCollStr];
+
+ obj = outColl.convertToSingleObject("value");
+ assert.eq( 25600 , obj.count , "Received wrong result " + obj.count );
+
+ print("checking result field");
+ assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
+ assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
+}
+
+assert( gotAGoodOne , "no good for out db" )
s.stop()
+
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index ed69d1f..cc3f712 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -27,14 +27,16 @@ db.foo.save( { _id : 6 , name : "allan" } )
assert.eq( 6 , db.foo.find().count() , "basic count" );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [Minkey -> allan) , * [allan -> ..)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // * [allan -> sara) , [sara -> Maxkey)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [alan -> joe) , [joe -> sara]
+
+s.printChunks()
assert.eq( 6 , db.foo.find().count() , "basic count after split " );
assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : secondary.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "allan" } , to : secondary.getMongo().name } );
assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" );
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 2a30936..f6cb9e4 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -53,7 +53,7 @@ sleep( 6000 )
assert( cur.next() , "T3" )
assert( cur.next() , "T4" );
sleep( 22000 )
-assert.throws( function(){ cur.next(); } , "T5" )
+assert.throws( function(){ cur.next(); } , null , "T5" )
after = db.runCommand( { "cursorInfo" : 1 , "setTimeout" : 10000 } ) // 10 seconds
gc(); gc()
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 05b8b8c..c22f094 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -81,10 +81,10 @@ assert.eq( 1 , db.foo3.count() , "eval pre1" );
assert.eq( 1 , db.foo2.count() , "eval pre2" );
assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , "eval 2" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , null , "eval 2" )
assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , "eval 4" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , null , "eval 4" )
// ---- unique shard key ----
@@ -105,6 +105,14 @@ assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
assert( a.foo4.getIndexes()[1].unique , "ua3" );
assert( b.foo4.getIndexes()[1].unique , "ub3" );
+assert.eq( 2 , db.foo4.count() , "uc1" )
+db.foo4.save( { num : 7 } )
+assert.eq( 3 , db.foo4.count() , "uc2" )
+db.foo4.save( { num : 7 } )
+gle = db.getLastErrorObj();
+assert( gle.err , "uc3" )
+assert.eq( 3 , db.foo4.count() , "uc4" )
+
// --- don't let you convertToCapped ----
assert( ! db.foo4.isCapped() , "ca1" );
assert( ! a.foo4.isCapped() , "ca2" );
@@ -152,12 +160,22 @@ assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count
// ---- can't shard non-empty collection without index -----
db.foo8.save( { a : 1 } );
+db.getLastError();
assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
+
+// ---- can't shard non-empty collection with null values in shard key ----
+
+db.foo9.save( { b : 1 } );
+db.getLastError();
+db.foo9.ensureIndex( { a : 1 } );
+assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" );
+
+
// --- listDatabases ---
r = db.getMongo().getDBs()
-assert.eq( 4 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
+assert.eq( 3 , r.databases.length , "listDatabases 1 : " + tojson( r ) )
assert.lt( 10000 , r.totalSize , "listDatabases 2 : " + tojson( r ) );
s.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index dfb2883..b2070ea 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -92,8 +92,10 @@ r = function( key , values ){
doMR = function( n ){
print(n);
-
- var res = db.mr.mapReduce( m , r );
+
+ // on-disk
+
+ var res = db.mr.mapReduce( m , r , "smr1_out" );
printjson( res );
assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
@@ -103,11 +105,26 @@ doMR = function( n ){
var z = {};
x.find().forEach( function(a){ z[a._id] = a.value.count; } );
assert.eq( 3 , Object.keySet( z ).length , "MR T2 " + n );
- assert.eq( 2 , z.a , "MR T2 " + n );
- assert.eq( 3 , z.b , "MR T2 " + n );
- assert.eq( 3 , z.c , "MR T2 " + n );
+ assert.eq( 2 , z.a , "MR T3 " + n );
+ assert.eq( 3 , z.b , "MR T4 " + n );
+ assert.eq( 3 , z.c , "MR T5 " + n );
x.drop();
+
+ // inline
+
+ var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
+ printjson( res );
+ assert.eq( new NumberLong(4) , res.counts.input , "MR T6 " + n );
+
+ var z = {};
+ res.find().forEach( function(a){ z[a._id] = a.value.count; } );
+ printjson( z );
+ assert.eq( 3 , Object.keySet( z ).length , "MR T7 " + n ) ;
+ assert.eq( 2 , z.a , "MR T8 " + n );
+ assert.eq( 3 , z.b , "MR T9 " + n );
+ assert.eq( 3 , z.c , "MR TA " + n );
+
}
doMR( "before" );
@@ -124,7 +141,7 @@ s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getServer('test').name }
doMR( "after extra split" );
-cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " };
+cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " , out : "broken1" };
x = db.runCommand( cmd );
y = s._connections[0].getDB( "test" ).runCommand( cmd );
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index b15ccd3..b28d88e 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -1,4 +1,3 @@
-
s = new ShardingTest( "features3" , 2 , 1 , 1 );
s.adminCommand( { enablesharding : "test" } );
@@ -25,7 +24,7 @@ assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
start = new Date()
print( "about to fork shell: " + Date() )
-join = startParallelShell( "db.foo.find( function(){ x = \"\"; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+join = startParallelShell( "db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
print( "after forking shell: " + Date() )
function getMine( printInprog ){
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
new file mode 100644
index 0000000..6ffd4b2
--- /dev/null
+++ b/jstests/sharding/geo_near_random1.js
@@ -0,0 +1,37 @@
+// this tests all points using $near
+load("jstests/libs/geo_near_random.js");
+
+var testName = "geo_near_random1";
+var s = new ShardingTest( testName , 3 );
+
+db = s.getDB("test"); // global db
+
+var test = new GeoNearRandomTest(testName);
+
+s.adminCommand({enablesharding:'test'});
+s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
+
+test.insertPts(50);
+
+for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
+ try {
+ s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3))});
+ } catch (e) {
+ // ignore this error
+ if (! e.match(/that chunk is already on that shard/)){
+ throw e;
+ }
+ }
+}
+
+printShardingSizes()
+
+var opts = {sharded: true}
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+s.stop()
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
new file mode 100644
index 0000000..4871e1e
--- /dev/null
+++ b/jstests/sharding/geo_near_random2.js
@@ -0,0 +1,44 @@
+// this tests 1% of all points using $near and $nearSphere
+load("jstests/libs/geo_near_random.js");
+
+var testName = "geo_near_random2";
+var s = new ShardingTest( testName , 3 );
+
+db = s.getDB("test"); // global db
+
+var test = new GeoNearRandomTest(testName);
+
+s.adminCommand({enablesharding:'test'});
+s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
+
+test.insertPts(5000);
+
+for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
+ try {
+ s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3))});
+ } catch (e) {
+ // ignore this error
+ if (! e.match(/that chunk is already on that shard/)){
+ throw e;
+ }
+ }
+}
+
+printShardingSizes()
+
+opts = {sphere:0, nToTest:test.nPts*0.01, sharded:true};
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
+s.stop()
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 1e0ba9d..3a8203f 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -20,7 +20,7 @@ s = new ShardingTest( "key_many" , 2 );
s.adminCommand( { enablesharding : "test" } )
db = s.getDB( "test" );
primary = s.getServer( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
function makeObjectDotted( v ){
var o = {};
@@ -97,12 +97,12 @@ for ( var i=0; i<types.length; i++ ){
s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
s.adminCommand( { split : longName , find : makeObjectDotted( curT.values[3] ) } );
- s.adminCommand( { movechunk : longName , find : makeObjectDotted( curT.values[3] ) , to : seconday.getMongo().name } );
+ s.adminCommand( { movechunk : longName , find : makeObjectDotted( curT.values[0] ) , to : secondary.getMongo().name } );
s.printChunks();
assert.eq( 3 , primary[shortName].find().toArray().length , curT.name + " primary count" );
- assert.eq( 3 , seconday[shortName].find().toArray().length , curT.name + " secondary count" );
+ assert.eq( 3 , secondary[shortName].find().toArray().length , curT.name + " secondary count" );
assert.eq( 6 , c.find().toArray().length , curT.name + " total count" );
assert.eq( 6 , c.find().sort( makeObjectDotted( 1 ) ).toArray().length , curT.name + " total count sorted" );
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 8ee1c70..bbc5dfb 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -20,11 +20,11 @@ db.foo.save( { name : "allan" } )
assert.eq( 6 , db.foo.find().count() , "basic count" );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
-s.adminCommand( { split : "test.foo" , find : { name : "joe" } } );
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [Minkey -> allan) , * [allan -> ..)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // * [allan -> sara) , [sara -> Maxkey)
+s.adminCommand( { split : "test.foo" , find : { name : "joe" } } ); // [alan -> joe) , [joe -> sara]
-s.adminCommand( { movechunk : "test.foo" , find : { name : "joe" } , to : seconday.getMongo().name } );
+s.adminCommand( { movechunk : "test.foo" , find : { name : "allan" } , to : seconday.getMongo().name } );
s.printChunks();
@@ -39,6 +39,11 @@ assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with co
assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
+// make sure we can't foce a split on an extreme key
+// [allan->joe)
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ) } );
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "joe" } } ) } );
+
s.stop();
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
new file mode 100644
index 0000000..75ad271
--- /dev/null
+++ b/jstests/sharding/limit_push.js
@@ -0,0 +1,47 @@
+// This test is to ensure that limit() clauses are pushed down to the shards and evaluated
+// See: http://jira.mongodb.org/browse/SERVER-1896
+
+s = new ShardingTest( "limit_push", 2, 1, 1 );
+
+db = s.getDB( "test" );
+
+// Create some data
+for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
+db.limit_push.ensureIndex( { x : 1 } );
+assert.eq( 100 , db.limit_push.find().length() , "Incorrect number of documents" );
+
+// Shard the collection
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.limit_push" , key : { x : 1 } } );
+
+// Now split the and move the data between the shards
+s.adminCommand( { split : "test.limit_push", middle : { x : 50 }} );
+s.adminCommand( { moveChunk: "test.limit_push", find : { x : 51}, to : "shard0000" })
+
+// Check that the chunck have split correctly
+assert.eq( 2 , s.config.chunks.count() , "wrong number of chunks");
+
+// The query is asking for the maximum value below a given value
+// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+q = { x : { $lt : 60} };
+
+// Make sure the basic queries are correct
+assert.eq( 60 , db.limit_push.find( q ).count() , "Did not find 60 documents" );
+//rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+//assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+// Now make sure that the explain shos that each shard is returning a single document as indicated
+// by the "n" element for each shard
+exp = db.limit_push.find( q ).sort( { x:-1} ).limit(1).explain();
+printjson( exp )
+
+assert.eq("ParallelSort", exp.clusteredType, "Not a ParallelSort");
+
+var k = 0;
+for (var j in exp.shards) {
+ assert.eq( 1 , exp.shards[j][0].n, "'n' is not 1 from shard000" + k.toString());
+ k++
+}
+
+s.stop();
+
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
new file mode 100644
index 0000000..f6ba18a
--- /dev/null
+++ b/jstests/sharding/migrateBig.js
@@ -0,0 +1,45 @@
+
+s = new ShardingTest( "migrateBig" , 2 , 0 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+
+db = s.getDB( "test" )
+coll = db.foo
+
+big = ""
+while ( big.length < 10000 )
+ big += "eliot"
+
+for ( x=0; x<100; x++ )
+ coll.insert( { x : x , big : big } )
+
+s.adminCommand( { split : "test.foo" , middle : { x : 33 } } )
+s.adminCommand( { split : "test.foo" , middle : { x : 66 } } )
+s.adminCommand( { movechunk : "test.foo" , find : { x : 90 } , to : s.getOther( s.getServer( "test" ) ).name } )
+
+db.printShardingStatus()
+
+print( "YO : " + s.getServer( "test" ).host )
+direct = new Mongo( s.getServer( "test" ).host )
+print( "direct : " + direct )
+
+directDB = direct.getDB( "test" )
+
+for ( done=0; done<2*1024*1024; done+=big.length ){
+ directDB.foo.insert( { x : 50 + Math.random() , big : big } )
+ directDB.getLastError();
+}
+
+db.printShardingStatus()
+
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { x : 50 } , to : s.getOther( s.getServer( "test" ) ).name } ); } , [] , "move should fail" )
+
+for ( i=0; i<20; i+= 2 )
+ s.adminCommand( { split : "test.foo" , middle : { x : i } } )
+
+db.printShardingStatus()
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 120 * 1000 , 2000 )
+
+s.stop()
diff --git a/jstests/sharding/multi_mongos1.js b/jstests/sharding/multi_mongos1.js
new file mode 100644
index 0000000..cf9ebde
--- /dev/null
+++ b/jstests/sharding/multi_mongos1.js
@@ -0,0 +1,70 @@
+// multi_mongos.js
+
+// setup sharding with two mongos, s1 and s2
+s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding : "test" } );
+s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s1.config.databases.find().forEach( printjson )
+
+viaS1 = s1.getDB( "test" ).foo;
+viaS2 = s2.getDB( "test" ).foo;
+
+primary = s1.getServer( "test" ).getDB( "test" ).foo;
+secondary = s1.getOther( primary.name ).getDB( "test" ).foo;
+
+N = 4;
+for (i=1; i<=N; i++) {
+ viaS1.save( { num : i } );
+}
+
+// initial checks
+
+// both mongos see all elements
+assert.eq( N , viaS1.find().toArray().length , "normal A" );
+assert.eq( N , viaS2.find().toArray().length , "other A" );
+
+// all elements are in one of the shards
+assert.eq( N , primary.count() , "p1" )
+assert.eq( 0 , secondary.count() , "s1" )
+assert.eq( 1 , s1.onNumShards( "foo" ) , "on 1 shards" );
+
+//
+// STEP 1 (builds a bit of context so there should probably not be a step 2 in this same test)
+// where we try to issue a move chunk from a mongos that's stale
+// followed by a split on a valid chunk, albeit one with not the highest lastmod
+
+// split in [Minkey->1), [1->N), [N,Maxkey)
+s1.adminCommand( { split : "test.foo" , middle : { num : 1 } } );
+s1.adminCommand( { split : "test.foo" , middle : { num : N } } );
+
+// s2 is now stale w.r.t boundaires around { num: 1 }
+res = s2.getDB( "admin" ).runCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq( 0 , res.ok , "a move with stale boundaries should not have succeeded" + tojson(res) );
+
+// s2 must have reloaded as a result of a failed move; retrying should work
+res = s2.getDB( "admin" ).runCommand( { movechunk : "test.foo" , find : { num : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq( 1 , res.ok , "mongos did not reload after a failed migrate" + tojson(res) );
+
+// s1 is not stale about the boundaries of [MinKey->1)
+// but we'll try to split a chunk whose lastmod.major was not touched by the previous move
+// in 1.6, that chunk would be with [Minkey->1) (where { num: -1 } falls)
+// after 1.6, it would be with [N->Maxkey] (where { num: N+1 } falls)
+// s.printShardingStatus()
+res = s1.getDB( "admin" ).runCommand( { split : "test.foo" , middle : { num : N+1 } } ); // replace with { num: -1 } instead in 1.6
+assert.eq( 1, res.ok , "split over accurate boudaries should have succeeded" + tojson(res) );
+
+// { num : 4 } is on primary
+// { num : 1 , 2 , 3 } are on secondary
+assert.eq( 1 , primary.find().toArray().length , "wrong count on primary" );
+assert.eq( 3 , secondary.find().toArray().length , "wrong count on secondary" );
+assert.eq( N , primary.find().itcount() + secondary.find().itcount() , "wrong total count" )
+
+assert.eq( N , viaS1.find().toArray().length , "normal B" );
+assert.eq( N , viaS2.find().toArray().length , "other B" );
+
+printjson( primary._db._adminCommand( "shardingState" ) );
+
+s1.stop(); \ No newline at end of file
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index aa6137d..fa27611 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -24,3 +24,4 @@ assert.eq(db.bar.findOne(), {_id:3}, '3.1');
assert.eq(db.bar.count(), 1, '3.2');
assert.eq(db.foo.count(), 0, '3.3');
+s.stop() \ No newline at end of file
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 1783238..ae382e4 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -21,6 +21,7 @@ assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
s.adminCommand( shardCommand );
cconfig = s.config.collections.findOne( { _id : "test.foo" } );
+assert( cconfig , "why no collection entry for test.foo" )
delete cconfig.lastmod
delete cconfig.dropped
assert.eq( cconfig , { _id : "test.foo" , key : { num : 1 } , unique : false } , "Sharded content" );
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index e57dc1e..7132563 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -41,9 +41,10 @@ printjson( primary._db._adminCommand( "shardingState" ) );
// --- filtering ---
-function doCounts( name , total ){
+function doCounts( name , total , onlyItCounts ){
total = total || ( primary.count() + secondary.count() );
- assert.eq( total , a.count() , name + " count" );
+ if ( ! onlyItCounts )
+ assert.eq( total , a.count() , name + " count" );
assert.eq( total , a.find().sort( { n : 1 } ).itcount() , name + " itcount - sort n" );
assert.eq( total , a.find().itcount() , name + " itcount" );
assert.eq( total , a.find().sort( { _id : 1 } ).itcount() , name + " itcount - sort _id" );
@@ -51,8 +52,12 @@ function doCounts( name , total ){
}
var total = doCounts( "before wrong save" )
-//secondary.save( { num : -3 } );
-//doCounts( "after wrong save" , total )
+secondary.save( { num : -3 } );
+doCounts( "after wrong save" , total , true )
+e = a.find().explain();
+assert.eq( 3 , e.n , "ex1" )
+assert.eq( 4 , e.nscanned , "ex2" )
+assert.eq( 1 , e.nChunkSkips , "ex3" )
// --- move all to 1 ---
print( "MOVE ALL TO 1" );
@@ -89,27 +94,18 @@ s.printCollectionInfo( "test.foo" , "after counts" );
assert.eq( 0 , primary.count() , "p count after drop" )
assert.eq( 0 , secondary.count() , "s count after drop" )
+// NOTE
+// the following bypasses the sharding layer and writes straight to the servers
+// this is not supported at all but we'd like to leave this backdoor for now
primary.save( { num : 1 } );
secondary.save( { num : 4 } );
-
assert.eq( 1 , primary.count() , "p count after drop and save" )
assert.eq( 1 , secondary.count() , "s count after drop and save " )
+print("*** makes sure that sharded access respects the drop command" );
-print("*** makes sure that sharding knows where things live" );
-
-assert.eq( 1 , a.count() , "a count after drop and save" )
-s.printCollectionInfo( "test.foo" , "after a count" );
-assert.eq( 1 , b.count() , "b count after drop and save" )
-s.printCollectionInfo( "test.foo" , "after b count" );
-
-assert( a.findOne( { num : 1 } ) , "a drop1" );
-assert.isnull( a.findOne( { num : 4 } ) , "a drop1" );
-
-s.printCollectionInfo( "test.foo" , "after a findOne tests" );
-
-assert( b.findOne( { num : 1 } ) , "b drop1" );
-assert.isnull( b.findOne( { num : 4 } ) , "b drop1" );
+assert.isnull( a.findOne() , "lookup via mongos 'a' accessed dropped data" );
+assert.isnull( b.findOne() , "lookup via mongos 'b' accessed dropped data" );
s.printCollectionInfo( "test.foo" , "after b findOne tests" );
@@ -130,6 +126,8 @@ s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
print( "*** ready to call dropDatabase" )
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
+// Waiting for SERVER-2253
+// assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
new file mode 100644
index 0000000..c722f21
--- /dev/null
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -0,0 +1,89 @@
+// replica set as solo shard
+// getLastError(2) fails on about every 170 inserts on my Macbook laptop -Tony
+// TODO: Add assertion code that catches hang
+
+load('jstests/libs/grid.js')
+
+function go() {
+
+ var N = 2000
+
+ // ~1KB string
+ var Text = ''
+ for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+ // Create replica set with 3 servers
+ var repset1 = new ReplicaSet('repset1', 3) .begin()
+
+ // Add data to it
+ var conn1a = repset1.getMaster()
+ var db1a = conn1a.getDB('test')
+ for (var i = 0; i < N; i++) {
+ db1a['foo'].insert({x: i, text: Text})
+ db1a.getLastError(2) // wait to be copied to at least one secondary
+ }
+
+ // Create 3 sharding config servers
+ var configsetSpec = new ConfigSet(3)
+ var configsetConns = configsetSpec.begin()
+
+ // Create sharding router (mongos)
+ var routerSpec = new Router(configsetSpec)
+ var routerConn = routerSpec.begin()
+ var dba = routerConn.getDB('admin')
+ var db = routerConn.getDB('test')
+
+ // Add repset1 as only shard
+ addShard (routerConn, repset1.getURL())
+
+ // Enable sharding on test db and its collection foo
+ enableSharding (routerConn, 'test')
+ db['foo'].ensureIndex({x: 1})
+ shardCollection (routerConn, 'test', 'foo', {x: 1})
+
+ sleep(30000)
+ printjson (db['foo'].stats())
+ dba.printShardingStatus()
+ printjson (db['foo'].count())
+
+ // Test case where GLE should return an error
+ db.foo.insert({_id:'a', x:1});
+ db.foo.insert({_id:'a', x:1});
+ var x = db.getLastErrorObj(2, 30000)
+ assert.neq(x.err, null, tojson(x));
+
+ // Add more data
+ for (var i = N; i < 2*N; i++) {
+ db['foo'].insert({x: i, text: Text})
+ var x = db.getLastErrorObj(2, 30000) // wait to be copied to at least one secondary
+ if (i % 30 == 0) print(i)
+ if (i % 100 == 0 || x.err != null) printjson(x);
+ assert.eq(x.err, null, tojson(x));
+ }
+
+ // take down the slave and make sure it fails over
+ repset1.stop(1);
+ repset1.stop(2);
+ db.getMongo().setSlaveOk();
+ print("trying some queries");
+ assert.soon(function() { try {
+ db.foo.find().next();
+ }
+ catch(e) {
+ print(e);
+ return false;
+ }
+ return true;
+ });
+
+ // Done
+ routerSpec.end()
+ configsetSpec.end()
+ repset1.stopSet()
+
+ print('shard_insert_getlasterror_w2.js SUCCESS')
+}
+
+//Uncomment below to execute
+go()
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 0edb7a7..e2b287e 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -2,7 +2,7 @@
s = new ShardingTest( "sort1" , 2 , 0 , 2 )
s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+s.adminCommand( { shardcollection : "test.data" , key : { 'sub.num' : 1 } } );
db = s.getDB( "test" );
@@ -11,16 +11,16 @@ N = 100
forward = []
backward = []
for ( i=0; i<N; i++ ){
- db.data.insert( { _id : i , num : i , x : N - i } )
+ db.data.insert( { _id : i , sub: {num : i , x : N - i }} )
forward.push( i )
backward.push( ( N - 1 ) - i )
}
db.getLastError();
-s.adminCommand( { split : "test.data" , middle : { num : 33 } } )
-s.adminCommand( { split : "test.data" , middle : { num : 66 } } )
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } )
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } )
-s.adminCommand( { movechunk : "test.data" , find : { num : 50 } , to : s.getOther( s.getServer( "test" ) ).name } );
+s.adminCommand( { movechunk : "test.data" , find : { 'sub.num' : 50 } , to : s.getOther( s.getServer( "test" ) ).name } );
assert.eq( 3 , s.config.chunks.find().itcount() , "A1" );
@@ -28,31 +28,31 @@ temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
assert.eq( temp[0].shard , temp[2].shard , "A2" );
assert.neq( temp[0].shard , temp[1].shard , "A3" );
-temp = db.data.find().sort( { num : 1 } ).toArray();
+temp = db.data.find().sort( { 'sub.num' : 1 } ).toArray();
assert.eq( N , temp.length , "B1" );
for ( i=0; i<100; i++ ){
- assert.eq( i , temp[i].num , "B2" )
+ assert.eq( i , temp[i].sub.num , "B2" )
}
-db.data.find().sort( { num : 1 } ).toArray();
-s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray();
+db.data.find().sort( { 'sub.num' : 1 } ).toArray();
+s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray();
-a = Date.timeFunc( function(){ z = db.data.find().sort( { num : 1 } ).toArray(); } , 200 );
+a = Date.timeFunc( function(){ z = db.data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
assert.eq( 100 , z.length , "C1" )
-b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { num : 1 } ).toArray(); } , 200 );
+b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
assert.eq( 67 , z.length , "C2" )
print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" )
// -- secondary index sorting
-function getSorted( by , want , dir , proj ){
+function getSorted( by , dir , proj ){
var s = {}
s[by] = dir || 1;
printjson( s )
var cur = db.data.find( {} , proj || {} ).sort( s )
- return terse( cur.map( function(z){ return z[want]; } ) );
+ return terse( cur.map( function(z){ return z.sub.num; } ) );
}
function terse( a ){
@@ -68,14 +68,22 @@ function terse( a ){
forward = terse(forward);
backward = terse(backward);
-assert.eq( forward , getSorted( "num" , "num" , 1 ) , "D1" )
-assert.eq( backward , getSorted( "num" , "num" , -1 ) , "D2" )
+assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" )
+assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" )
-assert.eq( backward , getSorted( "x" , "num" , 1 ) , "D3" )
-assert.eq( forward , getSorted( "x" , "num" , -1 ) , "D4" )
+assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" )
+assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" )
-assert.eq( backward , getSorted( "x" , "num" , 1 , { num : 1 } ) , "D5" )
-assert.eq( forward , getSorted( "x" , "num" , -1 , { num : 1 } ) , "D6" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" )
+
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" )
+
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" )
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" )
s.stop();
diff --git a/jstests/sharding/splitpick.js b/jstests/sharding/splitpick.js
deleted file mode 100644
index 3733906..0000000
--- a/jstests/sharding/splitpick.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// splitpick.js
-
-/**
-* tests picking the middle to split on
-*/
-
-s = new ShardingTest( "splitpick" , 2 );
-
-db = s.getDB( "test" );
-
-s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.foo" , key : { a : 1 } } );
-
-c = db.foo;
-
-for ( var i=1; i<20; i++ ){
- c.save( { a : i } );
-}
-c.save( { a : 99 } );
-db.getLastError();
-
-function checkSplit( f, want , num ){
- x = s.admin.runCommand( { splitvalue : "test.foo" , find : { a : f } } );
- assert.eq( want, x.middle ? x.middle.a : null , "splitvalue " + num + " " + tojson( x ) );
-}
-
-checkSplit( 1 , 1 , "1" )
-checkSplit( 3 , 1 , "2" )
-
-s.adminCommand( { split : "test.foo" , find : { a : 1 } } );
-checkSplit( 3 , 99 , "3" )
-s.adminCommand( { split : "test.foo" , find : { a : 99 } } );
-
-assert.eq( s.config.chunks.count() , 3 );
-s.printChunks();
-
-checkSplit( 50 , 10 , "4" )
-
-s.stop();
diff --git a/jstests/sharding/sync1.js b/jstests/sharding/sync1.js
index e649387..2c1a8f7 100644
--- a/jstests/sharding/sync1.js
+++ b/jstests/sharding/sync1.js
@@ -13,14 +13,23 @@ assert.eq( 2 , t.find().count() , "A4" );
test.checkHashes( "test" , "A3" );
test.tempKill();
-assert.throws( function(){ t.save( { x : 3 } ) } , "B1" )
+assert.throws( function(){ t.save( { x : 3 } ) } , null , "B1" )
assert.eq( 2 , t.find().itcount() , "B2" );
test.tempStart();
test.checkHashes( "test" , "B3" );
-
assert.eq( 2 , t.find().itcount() , "C1" );
-t.remove( { x : 1 } )
+assert.soon( function(){
+ try {
+ t.remove( { x : 1 } )
+ return true;
+ }
+ catch ( e ){
+ print( e );
+ }
+ return false;
+} )
+t.find().forEach( printjson )
assert.eq( 1 , t.find().itcount() , "C2" );
test.stop();
diff --git a/jstests/sharding/update1.js b/jstests/sharding/update1.js
index 63d4bf6..6359050 100644
--- a/jstests/sharding/update1.js
+++ b/jstests/sharding/update1.js
@@ -42,5 +42,12 @@ assert.eq(err.code, 13123, 'key error code 2');
coll.update({_id:1, key:1}, {$set: {foo:2}});
assert.isnull(db.getLastError(), 'getLastError reset');
+coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
+assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" )
+
+coll.update( { key : 18 } , { $inc : { x : 5 } } , true , true );
+assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" )
+
+
s.stop()
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index 0516aff..a16ead3 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -2,22 +2,46 @@
s = new ShardingTest( "version1" , 1 , 2 )
+s.adminCommand( { enablesharding : "alleyinsider" } );
+s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
+
+// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+s.printShardingStatus();
+
a = s._connections[0].getDB( "admin" );
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).ok == 0 );
+
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : "a" } ).ok == 0 );
+
assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true } ).ok == 0 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "should have failed b/c no auth" );
-assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ) , "should have worked" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 ,
+ "should have failed b/c no auth" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ) ,
+ "should have failed because first setShardVersion needs shard info" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true ,
+ shard: "shard0000" , shardHost: "localhost:30000" } ) ,
+ "should have failed because version is config is 1|0" );
+
+assert.commandWorked( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+ version : new NumberLong( 4294967296 ), // 1|0
+ authoritative : true , shard: "shard0000" , shardHost: "localhost:30000" } ) ,
+ "should have worked" );
+
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : "a" , version : 2 } ).ok == 0 , "A" );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 0 , "B" );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 1 } ).ok == 0 , "C" );
-assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
-assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
+// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
s.stop();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 9683c92..f502fd3 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -2,6 +2,10 @@
s = new ShardingTest( "version2" , 1 , 2 )
+s.adminCommand( { enablesharding : "alleyinsider" } );
+s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
+s.adminCommand( { shardcollection : "alleyinsider.bar" , key : { num : 1 } } );
+
a = s._connections[0].getDB( "admin" );
// setup from one client
@@ -9,28 +13,41 @@ a = s._connections[0].getDB( "admin" );
assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 0 );
assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 0 );
-assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 , authoritative : true } ).ok == 1 );
+assert( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , authoritative : true ,
+ version : new NumberLong( 4294967296 ), // 1|0
+ shard: "shard0000" , shardHost: "localhost:30000" } ).ok == 1 );
+
+printjson( s.config.chunks.findOne() );
-assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i == 2 );
-assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i == 2 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.t == 1000 );
+assert( a.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t == 1000 );
// from another client
a2 = connect( s._connections[0].name + "/admin" );
-assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.i , 2 , "a2 global 1" );
+assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).global.t , 1000 , "a2 global 1" );
assert.eq( a2.runCommand( { "getShardVersion" : "alleyinsider.foo" , configdb : s._configDB } ).mine.i , 0 , "a2 mine 1" );
function simpleFindOne(){
return a2.getMongo().getDB( "alleyinsider" ).foo.findOne();
}
-assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : 2 , authoritative : true } ) , "setShardVersion bar temp");
+assert.commandWorked( a2.runCommand( { "setShardVersion" : "alleyinsider.bar" , configdb : s._configDB , version : new NumberLong( 4294967296 ) , authoritative : true } ) , "setShardVersion bar temp");
+
assert.throws( simpleFindOne , [] , "should complain about not in sharded mode 1" );
-assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1");
-simpleFindOne(); // now should run ok
-assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2");
-simpleFindOne(); // newer version is ok
+
+
+// the only way that setSharVersion passes is if the shard agrees with the version
+// the shard takes its version from config directly
+// TODO bump timestamps in config
+// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 2 } ).ok == 1 , "setShardVersion a2-1");
+
+// simpleFindOne(); // now should run ok
+
+// assert( a2.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).ok == 1 , "setShardVersion a2-2");
+
+// simpleFindOne(); // newer version is ok
s.stop();
diff --git a/jstests/shellkillop.js b/jstests/shellkillop.js
index 1091458..580d4c8 100644
--- a/jstests/shellkillop.js
+++ b/jstests/shellkillop.js
@@ -1,18 +1,65 @@
-baseName = "jstests_shellkillop";
-
-db[ baseName ].drop();
-
-for( i = 0; i < 100000; ++i ) {
- db[ baseName ].save( {i:1} );
-}
-assert.eq( 100000, db[ baseName ].count() );
-
-spawn = startMongoProgramNoConnect( "mongo", "--autokillop", "--port", myPort(), "--eval", "db." + baseName + ".update( {}, {$set:{i:\"abcdefghijkl\"}}, false, true ); db." + baseName + ".count();" );
-sleep( 100 );
-stopMongoProgramByPid( spawn );
-sleep( 100 );
-inprog = db.currentOp().inprog
-printjson( inprog );
-for( i in inprog ) {
- assert( inprog[ i ].ns != "test." + baseName, "still running op" );
-}
+baseName = "jstests_shellkillop";
+
+// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
+retry = false;
+
+function testShellAutokillop() {
+
+if (_isWindows()) {
+ print("shellkillop.js not testing on windows, as functionality is missing there");
+ print("shellkillop.js see http://jira.mongodb.org/browse/SERVER-1451");
+}
+else {
+ db[baseName].drop();
+
+ print("shellkillop.js insert data");
+ for (i = 0; i < 100000; ++i) {
+ db[baseName].insert({ i: 1 });
+ }
+ assert.eq(100000, db[baseName].count());
+
+ // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
+ // it's just for testing purposes and thus not in the shell help
+ var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
+ print("shellkillop.js evalStr:" + evalStr);
+ spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
+
+ sleep(100);
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
+ retry = false;
+
+ stopMongoProgramByPid(spawn);
+
+ sleep(100);
+
+ print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
+
+ var inprog = db.currentOp().inprog;
+ for (i in inprog) {
+ if (inprog[i].ns == "test." + baseName)
+ throw "shellkillop.js op is still running: " + tojson( inprog[i] );
+ }
+
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
+ retry = false;
+}
+
+}
+
+for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
+ try {
+ testShellAutokillop();
+ } catch (e) {
+ if ( !retry ) {
+ throw e;
+ }
+ printjson( e );
+ print( "retrying..." );
+ }
+}
+
+assert( !retry, "retried too many times" );
+
+print("shellkillop.js SUCCESS");
diff --git a/jstests/shellspawn.js b/jstests/shellspawn.js
index 7df3c04..6b713f8 100644
--- a/jstests/shellspawn.js
+++ b/jstests/shellspawn.js
@@ -1,3 +1,5 @@
+#!/usr/bin/mongod
+
baseName = "jstests_shellspawn";
t = db.getCollection( baseName );
t.drop();
diff --git a/jstests/shellstartparallel.js b/jstests/shellstartparallel.js
new file mode 100644
index 0000000..5911029
--- /dev/null
+++ b/jstests/shellstartparallel.js
@@ -0,0 +1,17 @@
+function f() {
+ throw "intentional_throw_to_test_assert_throws";
+}
+assert.throws(f);
+
+// verify that join works
+db.sps.drop();
+join = startParallelShell("sleep(1000); db.sps.insert({x:1}); db.getLastError();");
+join();
+assert.eq(1, db.sps.count(), "join problem?");
+
+// test with a throw
+join = startParallelShell("db.sps.insert({x:1}); db.getLastError(); throw 'intentionally_uncaught';");
+join();
+assert.eq(2, db.sps.count(), "join2 problem?");
+
+print("shellstartparallel.js SUCCESS");
diff --git a/jstests/slowNightly/32bit.js b/jstests/slowNightly/32bit.js
new file mode 100755
index 0000000..d80cc78
--- /dev/null
+++ b/jstests/slowNightly/32bit.js
@@ -0,0 +1,125 @@
+// 32bit.js dm
+
+var forceSeedToBe = null;
+
+if (forceSeedToBe)
+ print("\n32bit.js WARNING FORCING A SPECIFIC SEED - TEST WILL RUN DURING DAY");
+
+function f() {
+ seed = forceSeedToBe || Math.random();
+
+ pass = 1;
+
+ var mydb = db.getSisterDB( "test_32bit" );
+ mydb.dropDatabase();
+
+ while( 1 ) {
+ if( pass >= 2 )
+ break;
+ print("32bit.js PASS #" + pass);
+ pass++;
+
+ t = mydb.colltest_32bit;
+
+ print("seed=" + seed);
+
+ t.insert({x:1});
+ t.ensureIndex({a:1});
+ t.ensureIndex({b:1}, true);
+ t.ensureIndex({x:1});
+ if( Math.random() < 0.3 )
+ t.ensureIndex({c:1});
+ t.ensureIndex({d:1});
+ t.ensureIndex({e:1});
+ t.ensureIndex({f:1});
+
+ big = 'a b';
+ big = big + big;
+ k = big;
+ big = big + big;
+ big = big + big;
+ big = big + big;
+
+ a = 0;
+ c = 'kkk';
+ var start = new Date();
+ while( 1 ) {
+ b = Math.random(seed);
+ d = c + -a;
+ f = Math.random(seed) + a;
+ a++;
+ cc = big;
+ if( Math.random(seed) < .1 )
+ cc = null;
+ t.insert({a:a,b:b,c:cc,d:d,f:f});
+ if( Math.random(seed) < 0.01 ) {
+
+ if( mydb.getLastError() ) {
+ /* presumably we have mmap error on 32 bit. try a few more manipulations attempting to break things */
+ t.insert({a:33,b:44,c:55,d:66,f:66});
+ t.insert({a:33,b:44000,c:55,d:66});
+ t.insert({a:33,b:440000,c:55});
+ t.insert({a:33,b:4400000});
+ t.update({a:20},{'$set':{c:'abc'}});
+ t.update({a:21},{'$set':{c:'aadsfbc'}});
+ t.update({a:22},{'$set':{c:'c'}});
+ t.update({a:23},{'$set':{b:cc}});
+ t.remove({a:22});
+ break;
+ }
+
+ t.remove({a:a});
+ t.remove({b:Math.random(seed)});
+ t.insert({e:1});
+ t.insert({f:'aaaaaaaaaa'});
+
+ if( Math.random() < 0.00001 ) { print("remove cc"); t.remove({c:cc}); }
+ if( Math.random() < 0.0001 ) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); }
+ if( Math.random() < 0.00001 ) { print("remove e"); t.remove({e:1}); }
+ }
+ if (a == 20000 ) {
+ var delta_ms = (new Date())-start;
+ // 2MM / 20000 = 100. 1000ms/sec.
+ var eta_secs = delta_ms * (100 / 1000);
+ print("32bit.js eta_secs:" + eta_secs);
+ if( eta_secs > 1000 ) {
+ print("32bit.js machine is slow, stopping early. a:" + a);
+ mydb.dropDatabase();
+ return;
+ }
+ }
+ if( a % 100000 == 0 ) {
+ print(a);
+ // on 64 bit we won't error out, so artificially stop. on 32 bit we will hit mmap limit ~1.6MM but may
+ // vary by a factor of 2x by platform
+ if( a >= 2200000 ) {
+ mydb.dropDatabase();
+ return;
+ }
+ }
+ }
+ print("count: " + t.count());
+
+ var res = t.validate();
+ if( !res.valid ) {
+ print("32bit.js FAIL validating");
+ print(res.result);
+ printjson(res);
+ //mydb.dropDatabase();
+ throw "fail validating 32bit.js";
+ }
+
+ mydb.dropDatabase();
+ }
+
+ print("32bit.js SUCCESS");
+}
+
+if (!db._adminCommand("buildInfo").debug && !db.runCommand( { serverStatus : 1 , repl : 1 } ).repl ){
+ /* this test is slow, so don't run during the day */
+ print("\n32bit.js running - this test is slow so only runs at night.");
+ f();
+}
+else {
+ print("32bit.js skipping this test - debug server build would be too slow");
+}
diff --git a/jstests/slowNightly/btreedel.js b/jstests/slowNightly/btreedel.js
new file mode 100644
index 0000000..824eb3e
--- /dev/null
+++ b/jstests/slowNightly/btreedel.js
@@ -0,0 +1,43 @@
+// btreedel.js
+
+t = db.foo;
+t.remove({});
+
+for (var i = 0; i < 1000000; i++) {
+ t.insert({ _id: i, x: 'a b' });
+}
+
+print("1 insert done count: " + t.count());
+
+var c = t.find({y:null}).sort({ _id: 1 });
+for (var j = 0; j < 400000; j++) {
+ c.next();
+ if (j % 200000 == 0)
+ printjson(c.next());
+}
+printjson(c.next());
+
+var d = t.find({ _id: { $gt: 300000} }).sort({ _id: -1 });
+d.next();
+
+print("2");
+
+t.remove({ _id: { $gt: 200000, $lt: 600000} });
+
+print("3");
+print(d.hasNext());
+
+n = 0;
+last = {};
+printjson(c.next());
+while (c.hasNext()) {
+ n++;
+ last = c.next();
+}
+
+print("4. n:" + n);
+printjson(last);
+
+assert(n > 100000);
+
+print("btreedel.js success");
diff --git a/jstests/slowNightly/capped4.js b/jstests/slowNightly/capped4.js
index 01af8f2..27d138c 100644
--- a/jstests/slowNightly/capped4.js
+++ b/jstests/slowNightly/capped4.js
@@ -31,4 +31,4 @@ assert( t.validate().valid, "G" );
db._adminCommand("closeAllDatabases");
-//assert( db.serverStatus().cursors.totalOpen == 0, "cursors open and shouldn't be");
+assert( db.serverStatus().cursors.totalOpen == 0, "cursors open and shouldn't be");
diff --git a/jstests/slowNightly/command_line_parsing.js b/jstests/slowNightly/command_line_parsing.js
new file mode 100644
index 0000000..38c7324
--- /dev/null
+++ b/jstests/slowNightly/command_line_parsing.js
@@ -0,0 +1,9 @@
+// validate command line parameter parsing
+
+port = allocatePorts( 1 )[ 0 ];
+var baseName = "jstests_slowNightly_command_line_parsing";
+
+// test notablescan
+var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--notablescan" );
+m.getDB( baseName ).getCollection( baseName ).save( {a:1} );
+assert.throws( function() { m.getDB( baseName ).getCollection( baseName ).find( {a:1} ).toArray() } );
diff --git a/jstests/slowNightly/dur_big_atomic_update.js b/jstests/slowNightly/dur_big_atomic_update.js
new file mode 100644
index 0000000..ffb0d83
--- /dev/null
+++ b/jstests/slowNightly/dur_big_atomic_update.js
@@ -0,0 +1,31 @@
+// @file dur_big_atomic_update.js
+//
+// this tests writing 1GB in an atomic update to make sure we commit periodically
+
+var path = "/data/db/dur_big_atomic_update";
+
+conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--durOptions", 8);
+d = conn.getDB("test");
+d.foo.drop();
+
+for (var i=0; i<1024; i++){
+ d.foo.insert({_id:i});
+}
+
+big_string = 'x';
+while (big_string.length < 1024*1024) {
+ big_string += big_string;
+}
+
+d.foo.update({$atomic:1}, {$set: {big_string: big_string}}, false, /*multi*/true);
+err = d.getLastErrorObj();
+
+assert(err.err == null);
+assert(err.n == 1024);
+
+// free up space
+d.dropDatabase();
+
+stopMongod(30001);
+
+print("dur big atomic update SUCCESS");
diff --git a/jstests/slowNightly/dur_passthrough.js b/jstests/slowNightly/dur_passthrough.js
new file mode 100644
index 0000000..22482e0
--- /dev/null
+++ b/jstests/slowNightly/dur_passthrough.js
@@ -0,0 +1,89 @@
+// runs the toplevel jstests with --dur
+//
+// TODO(mathias) use paranoid mode (--durOptions 8) once we are reasonably sure it will pass
+
+// DEBUG : set this variable to debug by skipping to a specific test to start with and go from there
+//var skippingTo = /null.js/;
+var skippingTo = false;
+
+conn = startMongodEmpty("--port", 30100, "--dbpath", "/data/db/dur_passthrough", "--dur", "--smallfiles");
+db = conn.getDB("test");
+
+function durPassThrough() {
+
+ var runnerStart = new Date()
+
+ var ran = {};
+
+ /** run a test. won't run more than once. logs if fails and then throws.
+ */
+ function runTest(x) {
+ function _run(x) {
+ if (/[\/\\]_/.test(x.name) ||
+ !/\.js$/.test(x.name) ||
+ /repair/.test(x.name) ||
+// /numberlong/.test(x.name) ||
+ false // placeholder so all real tests end in ||
+ ) {
+ print("dur_passthrough.js >>>> skipping " + x.name);
+ return;
+ }
+ print();
+ print("dur_passthrough.js run " + x.name);
+ print("dur_passthrough.js end " + x.name + ' ' + Date.timeFunc(function () { load(x.name); }, 1) + "ms");
+ print();
+ }
+ if (ran[x.name])
+ return;
+ ran[x.name] = true;
+ try {
+ _run(x);
+ }
+ catch (e) {
+ print("\n\n\n\ndur_passthrough.js FAIL " + x.name + "\n\n\n");
+ throw e;
+ }
+ }
+
+ var files = listFiles("jstests");
+
+ if( !skippingTo ) {
+ // run something that will almost surely pass and is fast just to make sure our framework
+ // here is really working
+ runTest({ name: 'jstests/basic1.js' });
+
+ // run "suspicious" tests early. these are tests that have ever failed in buildbot. we run them
+ // early and try to get a fail fast
+ runTest({ name: 'jstests/shellstartparallel.js' });
+ runTest({ name: 'jstests/cursora.js' });
+
+ // run the shell-oriented tests early. if the shell is broken the other tests aren't meaningful
+ runTest({ name: 'jstests/run_program1.js' });
+ runTest({ name: 'jstests/shellspawn.js' });
+ runTest({ name: 'jstests/shellkillop.js' });
+ }
+
+ files = files.sort(compareOn('name'));
+ files.forEach(
+ function (x) {
+ if (skippingTo && !skippingTo.test(x.name)) {
+ print("dur_passthrough.js temp skip " + x.name);
+ return;
+ }
+ skippingTo = false;
+
+ // to keep memory usage low on 32 bit:
+ db.adminCommand("closeAllDatabases");
+
+ runTest(x);
+ }
+ );
+
+ print("dur_passthrough.js stopMongod");
+ stopMongod(30100);
+ var runnerEnd = new Date();
+ print("dur_passthrough.js total runner time: " + ((runnerEnd.getTime() - runnerStart.getTime()) / 1000) + "secs")
+}
+
+durPassThrough();
+print("dur_passthrough.js SUCCESS");
diff --git a/jstests/slowNightly/dur_remove_old_journals.js b/jstests/slowNightly/dur_remove_old_journals.js
new file mode 100644
index 0000000..3c57c12
--- /dev/null
+++ b/jstests/slowNightly/dur_remove_old_journals.js
@@ -0,0 +1,53 @@
+// this test makes sure that old journal files are removed
+
+// tunables
+STRING_SIZE = 1024*1024;
+NUM_TO_INSERT = 2.5*1024;
+PATH = "/data/db/dur_remove_old_journals";
+SYNC_DELAY = 5; // must be a number
+
+conn = startMongodEmpty("--port", 30001, "--dbpath", PATH, "--dur", "--smallfiles", "--syncdelay", ''+SYNC_DELAY);
+db = conn.getDB("test");
+
+longString = 'x';
+while (longString.length < STRING_SIZE)
+ longString += longString;
+
+numInserted = 0;
+while (numInserted < NUM_TO_INSERT){
+ db.foo.insert({_id: numInserted++, s:longString});
+
+
+ if (numInserted % 100 == 0){
+ print("numInserted: " + numInserted);
+ db.adminCommand({fsync:1});
+ db.foo.remove();
+ db.adminCommand({fsync:1});
+ }
+}
+
+sleepSecs = SYNC_DELAY + 15 // long enough for data file flushing and journal keep time
+print("\nWaiting " + sleepSecs + " seconds...\n");
+sleep(sleepSecs*1000);
+
+
+files = listFiles(PATH + "/journal")
+printjson(files);
+
+var nfiles = 0;
+files.forEach(function (file) {
+ assert.eq('string', typeof (file.name)); // sanity checking
+ if (/prealloc/.test(file.name)) {
+ ;
+ }
+ else {
+ nfiles++;
+ assert(!(/j\._[01]/.test(file.name)), "Old journal file still exists: " + file.name);
+ }
+})
+
+assert.eq(2, nfiles); // j._2 and lsn
+
+stopMongod(30001);
+
+print("*** success ***");
diff --git a/jstests/slowNightly/geo_near_random1.js b/jstests/slowNightly/geo_near_random1.js
new file mode 100644
index 0000000..ad67bdc
--- /dev/null
+++ b/jstests/slowNightly/geo_near_random1.js
@@ -0,0 +1,13 @@
+// this tests all points using $near
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("nightly.geo_near_random1");
+
+test.insertPts(200);
+
+test.testPt([0,0]);
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+
diff --git a/jstests/slowNightly/geo_near_random2.js b/jstests/slowNightly/geo_near_random2.js
new file mode 100644
index 0000000..d7dbc97
--- /dev/null
+++ b/jstests/slowNightly/geo_near_random2.js
@@ -0,0 +1,21 @@
+// this tests 1% of all points using $near and $nearSphere
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("nightly.geo_near_random2");
+
+test.insertPts(10000);
+
+opts = {sphere:0, nToTest:test.nPts*0.01};
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
diff --git a/jstests/slowNightly/index_check9.js b/jstests/slowNightly/index_check9.js
new file mode 100644
index 0000000..6634d06
--- /dev/null
+++ b/jstests/slowNightly/index_check9.js
@@ -0,0 +1,118 @@
+Random.setRandomSeed();
+
+t = db.test_index_check9;
+
+function doIt() {
+
+t.drop();
+
+function sort() {
+ var sort = {};
+ for( var i = 0; i < n; ++i ) {
+ sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
+ }
+ return sort;
+}
+
+var fields = [ 'a', 'b', 'c', 'd', 'e' ];
+n = Random.randInt( 5 ) + 1;
+var idx = sort();
+
+var chars = "abcdefghijklmnopqrstuvwxyz";
+var alphas = []
+for( var i = 0; i < n; ++i ) {
+ alphas.push( Random.rand() > 0.5 );
+}
+
+t.ensureIndex( idx );
+
+function obj() {
+ var ret = {};
+ for( var i = 0; i < n; ++i ) {
+ ret[ fields[ i ] ] = r( alphas[ i ] );
+ }
+ return ret;
+}
+
+function r( alpha ) {
+ if ( !alpha ) {
+ return Random.randInt( 10 );
+ } else {
+ var len = Random.randInt( 10 );
+ buf = "";
+ for( var i = 0; i < len; ++i ) {
+ buf += chars.charAt( Random.randInt( chars.length ) );
+ }
+ return buf;
+ }
+}
+
+function check() {
+ var v = t.validate();
+ if ( !t.valid ) {
+ printjson( t );
+ assert( t.valid );
+ }
+ var spec = {};
+ for( var i = 0; i < n; ++i ) {
+ if ( Random.rand() > 0.5 ) {
+ var bounds = [ r( alphas[ i ] ), r( alphas[ i ] ) ];
+ if ( bounds[ 0 ] > bounds[ 1 ] ) {
+ bounds.reverse();
+ }
+ var s = {};
+ if ( Random.rand() > 0.5 ) {
+ s[ "$gte" ] = bounds[ 0 ];
+ } else {
+ s[ "$gt" ] = bounds[ 0 ];
+ }
+ if ( Random.rand() > 0.5 ) {
+ s[ "$lte" ] = bounds[ 1 ];
+ } else {
+ s[ "$lt" ] = bounds[ 1 ];
+ }
+ spec[ fields[ i ] ] = s;
+ } else {
+ var vals = []
+ for( var j = 0; j < Random.randInt( 15 ); ++j ) {
+ vals.push( r( alphas[ i ] ) );
+ }
+ spec[ fields[ i ] ] = { $in: vals };
+ }
+ }
+ s = sort();
+ c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
+ c2 = t.find( spec ).sort( s ).explain().nscanned;
+ c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
+ // assert.eq( c1, c3, "spec: " + tojson( spec ) + ", sort: " + tojson( s ) );
+ // assert.eq( c1.length, c2 );
+ assert.eq( c1, c3 );
+}
+
+for( var i = 0; i < 10000; ++i ) {
+ t.save( obj() );
+ if( Random.rand() > 0.999 ) {
+ print( i );
+ check();
+ }
+}
+
+for( var i = 0; i < 100000; ++i ) {
+ if ( Random.rand() > 0.9 ) {
+ t.save( obj() );
+ } else {
+ t.remove( obj() ); // improve
+ }
+ if( Random.rand() > 0.999 ) {
+ print( i );
+ check();
+ }
+}
+
+check();
+
+}
+
+for( var z = 0; z < 5; ++z ) {
+ doIt();
+} \ No newline at end of file
diff --git a/jstests/slowNightly/large_chunk.js b/jstests/slowNightly/large_chunk.js
new file mode 100644
index 0000000..6cf40e3
--- /dev/null
+++ b/jstests/slowNightly/large_chunk.js
@@ -0,0 +1,51 @@
+// Where we test operations dealing with large chunks
+
+// Starts a new sharding environment limiting the chunksize to 2GB.
+// Note that early splitting will start with a 1/4 of max size currently.
+s = new ShardingTest( "large_chunk" , 2 , 2 , 1 , { chunksize : 2000 } );
+
+// take the balancer out of the equation
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+s.config.settings.find().forEach( printjson )
+db = s.getDB( "test" );
+
+//
+// Step 1 - Test moving a large chunk
+//
+
+// Turn on sharding on the 'test.foo' collection and generate a large chunk
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 400 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : num++ , s : bigString } );
+ inserted += bigString.length;
+}
+db.getLastError();
+assert.eq( 1 , s.config.chunks.count() , "step 1 - need one large chunk" );
+
+primary = s.getServer( "test" ).getDB( "test" );
+secondary = s.getOther( primary ).getDB( "test" );
+
+// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
+print("Checkpoint 1a")
+max = 200 * 1024 * 1024;
+moveChunkCmd = { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name , maxChunkSizeBytes : max };
+assert.throws( function() { s.adminCommand( moveChunkCmd ); } );
+
+// Move the chunk
+print("checkpoint 1b");
+before = s.config.chunks.find().toArray();
+s.adminCommand( { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name } );
+after = s.config.chunks.find().toArray();
+assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
+
+s.config.changelog.find().forEach( printjson )
+
+s.stop(); \ No newline at end of file
diff --git a/jstests/slowNightly/moveprimary-replset.js b/jstests/slowNightly/moveprimary-replset.js
new file mode 100755
index 0000000..0b6a78b
--- /dev/null
+++ b/jstests/slowNightly/moveprimary-replset.js
@@ -0,0 +1,67 @@
+// Move db between replica set shards -Tony
+
+load('jstests/libs/grid.js')
+
+function go() {
+
+var N = 10000
+
+// Create replica set of one server
+var repset1 = new ReplicaSet('repset1', 1) .begin()
+var conn1a = repset1.getMaster()
+var db1a = conn1a.getDB('test')
+
+// Add data to it
+for (var i = 1; i <= N; i++) db1a['foo'].insert({x: i})
+
+// Add another server to replica set
+var conn1b = repset1.addServer()
+conn1b.setSlaveOk()
+var db1b = conn1b.getDB('test')
+
+// Check that new server received replicated data
+assert (db1b['foo'].count() == N, 'data did not replicate')
+
+// Create sharding config servers
+var configset = new ConfigSet(3)
+configset.begin()
+
+// Create sharding router (mongos)
+var router = new Router(configset)
+var routerConn = router.begin()
+var db = routerConn.getDB('test')
+
+// Add repset1 as only shard
+addShard (routerConn, repset1.getURL())
+
+// Add data via router and check it
+db['foo'].update({}, {$set: {y: 'hello'}}, false, true)
+assert (db['foo'].count({y: 'hello'}) == N,
+ 'updating and counting docs via router (mongos) failed')
+
+// Create another replica set
+var repset2 = new ReplicaSet('repset2', 2) .begin()
+var conn2a = repset2.getMaster()
+
+// Add repset2 as second shard
+addShard (routerConn, repset2.getURL())
+
+routerConn.getDB('admin').printShardingStatus()
+printjson (conn2a.getDBs())
+
+// Move test db from repset1 to repset2
+moveDB (routerConn, 'test', repset2.getURL())
+
+routerConn.getDB('admin').printShardingStatus()
+printjson (conn2a.getDBs())
+
+//Done
+router.end()
+configset.end()
+repset2.stopSet()
+repset1.stopSet()
+
+print('moveprimary-replset.js SUCCESS')
+}
+
+go()
diff --git a/jstests/slowNightly/newcollection2.js b/jstests/slowNightly/newcollection2.js
new file mode 100644
index 0000000..6bf2495
--- /dev/null
+++ b/jstests/slowNightly/newcollection2.js
@@ -0,0 +1,11 @@
+// Alocate collection forcing just a small size remainder in 2nd extent
+
+port = allocatePorts( 1 )[ 0 ]
+var baseName = "jstests_disk_newcollection2";
+var m = startMongod( "--noprealloc", "--smallfiles", "--port", port, "--dbpath", "/data/db/" + baseName );
+db = m.getDB( "test" );
+
+db.createCollection( baseName, {size:0x1FFC0000-0x10-8192} );
+var v = db[ baseName ].validate();
+printjson( v );
+assert( v.valid );
diff --git a/jstests/slowNightly/sharding_balance1.js b/jstests/slowNightly/sharding_balance1.js
index 840aaff..9379c4f 100644
--- a/jstests/slowNightly/sharding_balance1.js
+++ b/jstests/slowNightly/sharding_balance1.js
@@ -1,7 +1,7 @@
// sharding_balance1.js
-s = new ShardingTest( "slow_sharding_balance1" , 2 , 2 , 1 , { chunksize : 1 } )
+s = new ShardingTest( "slow_sharding_balance1" , 2 , 1 , 1 , { chunksize : 1 } )
s.adminCommand( { enablesharding : "test" } );
diff --git a/jstests/slowNightly/sharding_balance2.js b/jstests/slowNightly/sharding_balance2.js
index c94e256..3296ff6 100644
--- a/jstests/slowNightly/sharding_balance2.js
+++ b/jstests/slowNightly/sharding_balance2.js
@@ -1,6 +1,6 @@
// sharding_balance2.js
-s = new ShardingTest( "slow_sharding_balance2" , 2 , 2 , 1 , { chunksize : 1 , manualAddShard : true } )
+s = new ShardingTest( "slow_sharding_balance2" , 2 , 1 , 1 , { chunksize : 1 , manualAddShard : true } )
names = s.getConnNames();
for ( var i=0; i<names.length; i++ ){
diff --git a/jstests/slowNightly/sharding_balance3.js b/jstests/slowNightly/sharding_balance3.js
index faec197..b0db05f 100644
--- a/jstests/slowNightly/sharding_balance3.js
+++ b/jstests/slowNightly/sharding_balance3.js
@@ -1,6 +1,8 @@
// sharding_balance3.js
-s = new ShardingTest( "slow_sharding_balance3" , 2 , 2 , 1 , { chunksize : 1 } );
+// simple test to make sure things get balanced
+
+s = new ShardingTest( "slow_sharding_balance3" , 2 , 3 , 1 , { chunksize : 1 } );
s.adminCommand( { enablesharding : "test" } );
diff --git a/jstests/slowNightly/sharding_balance4.js b/jstests/slowNightly/sharding_balance4.js
index c54d3da..4cbbba6 100644
--- a/jstests/slowNightly/sharding_balance4.js
+++ b/jstests/slowNightly/sharding_balance4.js
@@ -1,6 +1,8 @@
// sharding_balance4.js
-s = new ShardingTest( "slow_sharding_balance4" , 2 , 2 , 1 , { chunksize : 1 } )
+// check that doing updates done during a migrate all go to the right place
+
+s = new ShardingTest( "slow_sharding_balance4" , 2 , 1 , 1 , { chunksize : 1 } )
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
@@ -36,9 +38,8 @@ for ( i=0; i<N*10; i++ ){
}
db.getLastError();
-s.printChunks( "test.foo" )
-
-for ( var i=0; i<10; i++ ){
+for ( var i=0; i<50; i++ ){
+ s.printChunks( "test.foo" )
if ( check( "initial:" + i , true ) )
break;
sleep( 5000 )
@@ -48,19 +49,6 @@ check( "initial at end" )
assert.lt( 20 , s.config.chunks.count() , "setup2" );
-function dist(){
- var x = {}
- s.config.chunks.find( { ns : "test.foo" } ).forEach(
- function(z){
- if ( x[z.shard] )
- x[z.shard]++
- else
- x[z.shard] = 1;
- }
- );
- return x;
-}
-
function check( msg , dontAssert ){
for ( var x in counts ){
var e = counts[x];
@@ -69,9 +57,15 @@ function check( msg , dontAssert ){
if ( z && z.x == e )
continue;
- if ( dontAssert )
+ if ( dontAssert ){
+ if ( z )
+ delete z.s;
+ print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) )
return false;
+ }
+ // we will assert past this point but wait a bit to see if it is because the missing update
+ // was being held in the writeback roundtrip
sleep( 10000 );
var y = db.foo.findOne( { _id : parseInt( x ) } )
@@ -79,6 +73,8 @@ function check( msg , dontAssert ){
if ( y ){
delete y.s;
}
+
+ s.printChunks( "test.foo" )
assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg )
assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg )
@@ -90,22 +86,27 @@ function check( msg , dontAssert ){
function diff(){
var myid = doUpdate( false )
var le = db.getLastErrorCmd();
+
if ( le.err )
print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
+ assert( le.updatedExisting , "GLE diff 1: " + tojson(le) )
+ assert.eq( 1 , le.n , "GLE diff 2: " + tojson(le) )
+
+
if ( Math.random() > .99 ){
db.getLastError()
- check(); // SERVER-1430 TODO
+ check( "random late check" ); // SERVER-1430
}
- var x = dist();
+ var x = s.chunkCounts( "foo" )
if ( Math.random() > .999 )
printjson( x )
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}
function sum(){
- var x = dist();
+ var x = s.chunkCounts( "foo" )
return x.shard0000 + x.shard0001;
}
diff --git a/jstests/slowNightly/sharding_balance_randomorder1.js b/jstests/slowNightly/sharding_balance_randomorder1.js
new file mode 100644
index 0000000..05eabc6
--- /dev/null
+++ b/jstests/slowNightly/sharding_balance_randomorder1.js
@@ -0,0 +1,54 @@
+// sharding_balance1.js
+
+s = new ShardingTest( "sharding_balance_randomorder1" , 2 , 2 , 1 , { chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+
+s.config.settings.find().forEach( printjson )
+
+db = s.getDB( "test" );
+
+bigString = ""
+while ( bigString.length < 10000 )
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+inserted = 0;
+num = 0;
+while ( inserted < ( 20 * 1024 * 1024 ) ){
+ db.foo.insert( { _id : Math.random() , s : bigString } );
+ inserted += bigString.length;
+}
+
+db.getLastError();
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+assert.lt( 20 , s.config.chunks.count() , "setup2" );
+
+function diff(){
+ var x = s.chunkCounts( "foo" );
+ printjson( x )
+ return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
+}
+
+function sum(){
+ var x = s.chunkCounts( "foo" );
+ return x.shard0000 + x.shard0001;
+}
+
+assert.lt( 20 , diff() , "big differential here" );
+print( diff() )
+
+assert.soon( function(){
+ var d = diff();
+ return d < 5;
+} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+
+var chunkCount = sum();
+s.adminCommand( { removeshard: "shard0000" } );
+
+assert.soon( function(){
+ printjson(s.chunkCounts( "foo" ));
+ s.config.shards.find().forEach(function(z){printjson(z);});
+ return chunkCount == s.config.chunks.count({shard: "shard0001"});
+} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
+
+s.stop();
diff --git a/jstests/slowNightly/sharding_cursors1.js b/jstests/slowNightly/sharding_cursors1.js
index 307e8d7..de59b0d 100644
--- a/jstests/slowNightly/sharding_cursors1.js
+++ b/jstests/slowNightly/sharding_cursors1.js
@@ -1,4 +1,4 @@
-s = new ShardingTest( "cursors1" , 2 , 0 , 1 , { chunksize : 1 } )
+s = new ShardingTest( "sharding_cursors1" , 2 , 0 , 1 , { chunksize : 1 } )
s.adminCommand( { enablesharding : "test" } );
@@ -17,6 +17,10 @@ toInsert = ( 1 * 1000 * 1000 );
for (var i=0; i < toInsert; i++ ){
db.foo.insert( { i: i, r: Math.random(), s: bigString } );
assert.eq(db.getLastError(), null, 'no error'); //SERVER-1541
+
+ if ( i % 1000 == 999 ) {
+ print( "already inserted " + ( i + 1 ) );
+ }
}
inserted = toInsert;
diff --git a/jstests/slowNightly/sharding_multiple_collections.js b/jstests/slowNightly/sharding_multiple_collections.js
new file mode 100644
index 0000000..61d9911
--- /dev/null
+++ b/jstests/slowNightly/sharding_multiple_collections.js
@@ -0,0 +1,53 @@
+// multcollections.js
+
+s = new ShardingTest( "multcollections" , 2 , 1 , 1 , { chunksize : 1 } );
+
+s.adminCommand( { enablesharding : "test" } );
+
+db = s.getDB( "test" )
+
+N = 100000
+
+S = ""
+while ( S.length < 500 )
+ S += "123123312312";
+
+for ( i=0; i<N; i++ ){
+ db.foo.insert( { _id : i , s : S } )
+ db.bar.insert( { _id : i , s : S , s2 : S } )
+ db.getLastError()
+}
+
+db.printShardingStatus()
+
+function mytest( coll , i , loopNumber ){
+ x = coll.find( { _id : i } ).explain();
+ if ( x )
+ return;
+ throw "can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber + " explain: " + tojson( x );
+}
+
+loopNumber = 0
+while ( 1 ){
+ for ( i=0; i<N; i++ ){
+ mytest( db.foo , i , loopNumber );
+ mytest( db.bar , i , loopNumber );
+ if ( i % 1000 == 0 )
+ print( i )
+ }
+ db.printShardingStatus()
+ loopNumber++;
+
+ if ( loopNumber == 1 ){
+ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+ }
+
+ assert( loopNumber < 1000 , "taking too long" );
+
+ if ( s.chunkDiff( "foo" ) < 12 && s.chunkDiff( "bar" ) < 12 )
+ break
+}
+
+s.stop()
+
diff --git a/jstests/slowNightly/run_sharding_passthrough.js b/jstests/slowNightly/sharding_passthrough.js
index fda982b..81781ca 100644
--- a/jstests/slowNightly/run_sharding_passthrough.js
+++ b/jstests/slowNightly/sharding_passthrough.js
@@ -1,4 +1,4 @@
-s = new ShardingTest( "auto1" , 2 , 1 , 1 );
+s = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 );
s.adminCommand( { enablesharding : "test" } );
db=s.getDB("test");
@@ -63,7 +63,7 @@ files.forEach(
* clean (apitest_dbcollection)
* logout and getnonce
*/
- if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|auth1|auth2)\.js$/.test(x.name)) {
+ if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|unix_socket\d*)\.js$/.test(x.name)) {
print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
return;
}
@@ -73,7 +73,7 @@ files.forEach(
return;
}
// These aren't supposed to get run under sharding:
- if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4)\.js$/.test(x.name)) {
+ if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|check_shard_index|mr_replaceIntoDB)\.js$/.test(x.name)) {
print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name)
return;
}
diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js
index b7d90ba..4ad126e 100644
--- a/jstests/slowNightly/sharding_rs1.js
+++ b/jstests/slowNightly/sharding_rs1.js
@@ -43,10 +43,19 @@ function diff(){
assert.lt( 20 , diff() , "big differential here" );
print( diff() )
+{
+ // quick test for SERVER-2686
+ var mydbs = db.getMongo().getDBs().databases;
+ for ( var i=0; i<mydbs.length; i++ ) {
+ assert( mydbs[i].name != "local" , "mongos listDatabases can't return local" );
+ }
+}
+
+
assert.soon( function(){
var d = diff();
return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+} , "balance didn't happen" , 1000 * 60 * 6 , 5000 );
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
@@ -56,7 +65,7 @@ for ( i=0; i<s._rs.length; i++ ){
x = r.test.getHashes( "test" );
print( r.url + "\t" + tojson( x ) )
for ( j=0; j<x.slaves.length; j++ )
- assert.eq( x.master.md5 , x.slaves[j].md5 , "hashes same for: " + r.url + " slave: " + j );
+ assert.eq( x.master.md5 , x.slaves[j].md5 , "hashes not same for: " + r.url + " slave: " + j );
}
diff --git a/jstests/slowNightly/sharding_rs2.js b/jstests/slowNightly/sharding_rs2.js
new file mode 100644
index 0000000..cd7cf68
--- /dev/null
+++ b/jstests/slowNightly/sharding_rs2.js
@@ -0,0 +1,163 @@
+// mostly for testing mongos w/replica sets
+
+
+s = new ShardingTest( "rs2" , 2 , 1 , 1 , { rs : true , chunksize : 1 } )
+
+db = s.getDB( "test" )
+t = db.foo
+
+// -------------------------------------------------------------------------------------------
+// ---------- test that config server updates when replica set config changes ----------------
+// -------------------------------------------------------------------------------------------
+
+
+db.foo.save( { _id : 5 ,x : 17 } )
+assert.eq( 1 , db.foo.count() );
+
+s.config.databases.find().forEach( printjson )
+s.config.shards.find().forEach( printjson )
+
+serverName = s.getServerName( "test" )
+
+function countNodes(){
+ var x = s.config.shards.findOne( { _id : serverName } );
+ return x.host.split( "," ).length
+}
+
+assert.eq( 3 , countNodes() , "A1" )
+
+rs = s.getRSEntry( serverName );
+rs.test.add()
+try {
+ rs.test.reInitiate();
+}
+catch ( e ){
+ // this os ok as rs's may close connections on a change of master
+ print( e );
+}
+
+assert.soon(
+ function(){
+ try {
+ printjson( rs.test.getMaster().getDB("admin").runCommand( "isMaster" ) )
+ s.config.shards.find().forEach( printjsononeline );
+ return countNodes() == 4;
+ }
+ catch ( e ){
+ print( e );
+ }
+ } , "waiting for config server to update" , 180 * 1000 , 1000 );
+
+// cleanup after adding node
+for ( i=0; i<5; i++ ){
+ try {
+ db.foo.findOne();
+ }
+ catch ( e ){}
+}
+
+// -------------------------------------------------------------------------------------------
+// ---------- test routing to slaves ----------------
+// -------------------------------------------------------------------------------------------
+
+// --- not sharded ----
+
+m = new Mongo( s.s.name );
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne().x , "B1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne().x , "B2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+printjson( before )
+printjson( after )
+
+assert.eq( before.query + 10 , after.query , "B3" )
+
+// --- add more data ----
+
+db.foo.ensureIndex( { x : 1 } )
+
+for ( i=0; i<100; i++ ){
+ if ( i == 17 ) continue;
+ db.foo.insert( { x : i } )
+}
+db.getLastError( 3 , 10000 );
+
+assert.eq( 100 , ts.count() , "B4" )
+assert.eq( 100 , ts.find().itcount() , "B5" )
+assert.eq( 100 , ts.find().batchSize(5).itcount() , "B6" )
+
+t.find().batchSize(3).next();
+gc(); gc(); gc();
+
+// --- sharded ----
+
+assert.eq( 100 , db.foo.count() , "C1" )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+
+assert.eq( 100 , t.count() , "C2" )
+s.adminCommand( { split : "test.foo" , middle : { x : 50 } } )
+
+db.printShardingStatus()
+
+other = s.config.shards.findOne( { _id : { $ne : serverName } } );
+s.adminCommand( { moveChunk : "test.foo" , find : { x : 10 } , to : other._id } )
+assert.eq( 100 , t.count() , "C3" )
+
+assert.eq( 50 , rs.test.getMaster().getDB( "test" ).foo.count() , "C4" )
+
+// by non-shard key
+
+m = new Mongo( s.s.name );
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 17 , ts.findOne( { _id : 5 } ).x , "D2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+assert.eq( before.query + 10 , after.query , "D3" )
+
+// by shard key
+
+m = new Mongo( s.s.name );
+ts = m.getDB( "test" ).foo
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+for ( i=0; i<10; i++ )
+ assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E1" )
+
+m.setSlaveOk()
+for ( i=0; i<10; i++ )
+ assert.eq( 57 , ts.findOne( { x : 57 } ).x , "E2" )
+
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+
+assert.eq( before.query + 10 , after.query , "E3" )
+
+assert.eq( 100 , ts.count() , "E4" )
+assert.eq( 100 , ts.find().itcount() , "E5" )
+printjson( ts.find().batchSize(5).explain() )
+assert.eq( 100 , ts.find().batchSize(5).itcount() , "E6" )
+
+printjson( db.adminCommand( "getShardMap" ) );
+
+
+s.stop()
diff --git a/jstests/slowNightly/unix_socket1.js b/jstests/slowNightly/unix_socket1.js
new file mode 100644
index 0000000..e651659
--- /dev/null
+++ b/jstests/slowNightly/unix_socket1.js
@@ -0,0 +1,26 @@
+if ( ! _isWindows() ) {
+ hoststring = db.getMongo().host
+ index = hoststring.lastIndexOf(':')
+ if (index == -1){
+ port = '27017'
+ } else {
+ port = hoststring.substr(index + 1)
+ }
+
+ sock = new Mongo('/tmp/mongodb-' + port + '.sock')
+ sockdb = sock.getDB(db.getName())
+ assert( sockdb.runCommand('ping').ok );
+
+ // test unix socket path
+ var ports = allocatePorts(1);
+ var path = "/data/db/sockpath";
+
+ var conn = new MongodRunner(ports[0], path, null, null, ["--unixSocketPrefix", path]);
+ conn.start();
+
+ var sock2 = new Mongo(path+"/mongodb-"+ports[0]+".sock");
+ sockdb2 = sock2.getDB(db.getName())
+ assert( sockdb2.runCommand('ping').ok );
+} else {
+ print("Not testing unix sockets on Windows");
+}
diff --git a/jstests/slowWeekly/conc_update.js b/jstests/slowWeekly/conc_update.js
index 6094136..d460a0d 100644
--- a/jstests/slowWeekly/conc_update.js
+++ b/jstests/slowWeekly/conc_update.js
@@ -16,36 +16,39 @@ print("making an index (this will take a while)")
db.conc.ensureIndex({x:1})
var c1=db.conc.count({x:{$lt:NRECORDS}})
-// this is just a flag that the child will toggle when it's done.
-db.concflag.update({}, {inprog:true}, true)
updater=startParallelShell("db=db.getSisterDB('concurrency');\
+ db.concflag.insert( {inprog:true} );\
+ sleep(20);\
db.conc.update({}, {$inc:{x: "+NRECORDS+"}}, false, true);\
e=db.getLastError();\
print('update error: '+ e);\
db.concflag.update({},{inprog:false});\
- assert.eq(e, null, \"update failed\");");
+ assert.eq(e, null, 'update failed');");
+
+assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } ,
+ "wait for fork" , 30000 , 1 );
querycount=0;
decrements=0;
misses=0
-while (1) {
- if (db.concflag.findOne().inprog) {
+
+assert.soon(
+ function(){
c2=db.conc.count({x:{$lt:NRECORDS}})
- e=db.getLastError()
print(c2)
- print(e)
- assert.eq(e, null, "some count() failed")
querycount++;
if (c2<c1)
decrements++;
else
misses++;
- c1 = c2;
- } else
- break;
- sleep(10);
-}
+ c1 = c2;
+ return ! db.concflag.findOne().inprog;
+ } ,
+ "update never finished" , 3600 * 1000 , 10 );
+
print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
+assert.eq( NRECORDS , db.conc.count() , "AT END 1" )
+
updater() // wait()
diff --git a/jstests/slowWeekly/disk_reuse1.js b/jstests/slowWeekly/disk_reuse1.js
new file mode 100644
index 0000000..4e504c0
--- /dev/null
+++ b/jstests/slowWeekly/disk_reuse1.js
@@ -0,0 +1,41 @@
+
+t = db.disk_reuse1;
+t.drop();
+
+N = 10000;
+
+function k(){
+ return Math.floor( Math.random() * N );
+}
+
+s = "";
+while ( s.length < 1024 )
+ s += "abc";
+
+state = {}
+
+for ( i=0; i<N; i++ )
+ t.insert( { _id : i , s : s } );
+
+orig = t.stats();
+
+t.remove();
+
+for ( i=0; i<N; i++ )
+ t.insert( { _id : i , s : s } );
+
+assert.eq( orig.storageSize , t.stats().storageSize , "A" )
+
+for ( j=0; j<100; j++ ){
+ for ( i=0; i<N; i++ ){
+ var r = Math.random();
+ if ( r > .5 )
+ t.remove( { _id : i } )
+ else
+ t.insert( { _id : i , s : s } )
+ }
+
+ //printjson( t.stats() );
+
+ assert.eq( orig.storageSize , t.stats().storageSize , "B" + j )
+}
diff --git a/jstests/slowWeekly/dur_passthrough.js b/jstests/slowWeekly/dur_passthrough.js
new file mode 100644
index 0000000..1840fb7
--- /dev/null
+++ b/jstests/slowWeekly/dur_passthrough.js
@@ -0,0 +1,44 @@
+//
+// simple runner to run toplevel tests in jstests
+//
+
+//TODO(mathias) add --master or make another test
+//conn = startMongodEmpty("--port", 30200, "--dbpath", "/data/db/dur_passthrough", "--dur", "--smallfiles", "--durOptions", "24");
+conn = startMongodEmpty("--port", 30200, "--dbpath", "/data/db/dur_passthrough", "--dur", "--smallfiles", "--durOptions", "8");
+db = conn.getDB("test");
+
+var files = listFiles("jstests");
+files = files.sort(compareOn('name'));
+
+var runnerStart = new Date()
+
+files.forEach(
+ function (x) {
+
+ if (/[\/\\]_/.test(x.name) ||
+ !/\.js$/.test(x.name) ||
+ /repair/.test(x.name) || // fails on recovery
+ /shellkillop/.test(x.name) || // takes forever and don't test anything new
+ false // placeholder so all real tests end in ||
+ )
+ {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+
+ print();
+ print(" *******************************************");
+ print(" Test : " + x.name + " ...");
+ print(" " + Date.timeFunc(function () { load(x.name); }, 1) + "ms");
+
+ }
+);
+
+stopMongod(30200);
+
+var runnerEnd = new Date()
+
+print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" )
+
+//TODO(mathias): test recovery here
+
diff --git a/jstests/slowWeekly/geo_near_random1.js b/jstests/slowWeekly/geo_near_random1.js
new file mode 100644
index 0000000..5ddfd26
--- /dev/null
+++ b/jstests/slowWeekly/geo_near_random1.js
@@ -0,0 +1,13 @@
+// this tests all points using $near
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("weekly.geo_near_random1");
+
+test.insertPts(1000);
+
+test.testPt([0,0]);
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+
diff --git a/jstests/slowWeekly/geo_near_random2.js b/jstests/slowWeekly/geo_near_random2.js
new file mode 100644
index 0000000..9e93657
--- /dev/null
+++ b/jstests/slowWeekly/geo_near_random2.js
@@ -0,0 +1,21 @@
+// this tests 1% of all points using $near and $nearSphere
+load("jstests/libs/geo_near_random.js");
+
+var test = new GeoNearRandomTest("weekly.geo_near_random2");
+
+test.insertPts(50000);
+
+opts = {sphere:0, nToTest:test.nPts*0.01};
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+
+opts.sphere = 1
+test.testPt([0,0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+
diff --git a/jstests/slowWeekly/indexbg_dur.js b/jstests/slowWeekly/indexbg_dur.js
new file mode 100644
index 0000000..5fbe0e7
--- /dev/null
+++ b/jstests/slowWeekly/indexbg_dur.js
@@ -0,0 +1,67 @@
+/**
+ * Kill mongod during a background index build and ensure that the bad index
+ * can be dropped on restart.
+ */
+
+function countFields( x ) {
+ var count = 0;
+ for( var i in x ) {
+ ++count;
+ }
+ return count;
+}
+
+size = 100000;
+while( 1 ) {
+ print( "size: " + size );
+
+ var testname = "index_build";
+ var path = "/data/db/" + testname+"_dur";
+ conn = startMongodEmpty("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8);
+ t = conn.getDB( testname ).getCollection( testname );
+
+ for( var i = 0; i < size; ++i ) {
+ t.save( {i:i} );
+ }
+ t.getDB().getLastError();
+ x = startMongoProgramNoConnect( "mongo", "--eval", "db.getSisterDB( '" + testname + "' )." + testname + ".ensureIndex( {i:1}, {background:true} );", conn.host );
+ sleep( 1000 );
+ stopMongod( 30001, /* signal */ 9 );
+ waitProgram( x );
+
+ conn = startMongodNoReset("--port", 30001, "--dbpath", path, "--dur", "--smallfiles", "--durOptions", 8);
+ t = conn.getDB( testname ).getCollection( testname );
+
+ var statsSize = countFields( t.stats().indexSizes );
+ var nsSize = conn.getDB( testname ).system.indexes.count( {ns:testname+'.'+testname} );
+
+ // If index build completed before the kill, try again with more data.
+ if ( !( statsSize == 1 && nsSize == 2 ) ) {
+ print( "statsSize: " + statsSize + ", nsSize: " + nsSize + ", retrying with more data" );
+ stopMongod( 30001 );
+ size *= 2;
+ continue;
+ }
+
+ assert.eq( "index not found", t.dropIndex( "i_1" ).errmsg );
+
+ var statsSize = countFields( t.stats().indexSizes );
+ var nsSize = conn.getDB( testname ).system.indexes.count( {ns:testname+'.'+testname} );
+
+ assert.eq( statsSize, nsSize );
+ assert( t.validate().valid );
+ // TODO check that index namespace is cleaned up as well once that is implemented
+
+ t.ensureIndex( {i:1} );
+ var statsSize = countFields( t.stats().indexSizes );
+ var nsSize = conn.getDB( testname ).system.indexes.count( {ns:testname+'.'+testname} );
+
+ assert.eq( 2, statsSize );
+ assert.eq( 2, nsSize );
+
+ exp = t.find( {i:20} ).explain();
+ assert.eq( 1, exp.n );
+ assert.eq( 'BtreeCursor i_1', exp.cursor );
+
+ break;
+}
diff --git a/jstests/slowWeekly/query_yield1.js b/jstests/slowWeekly/query_yield1.js
index e996b53..1a95b87 100644
--- a/jstests/slowWeekly/query_yield1.js
+++ b/jstests/slowWeekly/query_yield1.js
@@ -2,10 +2,10 @@
t = db.query_yield1;
t.drop()
-N = 10000;
+N = 20000;
i = 0;
-q = function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; }
+q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return false; }
while ( true ){
function fill(){
@@ -59,7 +59,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 50 , me );
+ assert.gt( 200 , me , "took too long for me to run" );
if ( x.inprog.length == 0 )
break;
diff --git a/jstests/slowWeekly/query_yield2.js b/jstests/slowWeekly/query_yield2.js
index e13fabe..dd7e5d9 100644
--- a/jstests/slowWeekly/query_yield2.js
+++ b/jstests/slowWeekly/query_yield2.js
@@ -2,10 +2,10 @@
t = db.query_yield2;
t.drop()
-N = 100;
+N = 200;
i = 0;
-q = function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; }
+q = function(){ var x=this.n; for ( var i=0; i<25000; i++ ){ x = x * 2; } return false; }
while ( true ){
function fill(){
@@ -59,7 +59,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 75 , me );
+ assert.gt( 100 , me );
if ( x.inprog.length == 0 )
break;
diff --git a/jstests/slowWeekly/update_yield1.js b/jstests/slowWeekly/update_yield1.js
index 2e63690..7e95855 100644
--- a/jstests/slowWeekly/update_yield1.js
+++ b/jstests/slowWeekly/update_yield1.js
@@ -27,7 +27,7 @@ while ( true ){
timeUpdate();
time = timeUpdate();
print( N + "\t" + time );
- if ( time > 2000 )
+ if ( time > 8000 )
break;
N *= 2;
@@ -47,13 +47,14 @@ num = 0;
start = new Date();
while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
var me = Date.timeFunc( function(){ t.findOne(); } );
+ if (me > 50) print("time: " + me);
if ( num++ == 0 ){
var x = db.currentOp()
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 50 , me );
+ assert.gt( 2000 , me );
}
join();
@@ -65,14 +66,16 @@ assert.eq( 0 , x.inprog.length , "weird 2" );
join = startParallelShell( "db.update_yield1.update( { $atomic : true } , { $inc : { n : 1 } } , false , true ); db.getLastError()" );
-assert.soon(
- function(){
- return db.currentOp().inprog.length > 0;
- } , "never doing update 2"
-);
+sleep(1000); // wait for shell startup ops to finish
+
+var x = db.currentOp();
+printjson(x);
+assert.eq(1, x.inprog.length, "never doing update 2");
+assert.eq("update", x.inprog[0].op);
+
+t.findOne(); // should wait for update to finish
-t.findOne();
var x = db.currentOp()
-assert.eq( 0 , x.inprog.length , "should have been atomic" );
+assert.eq( [] , x.inprog , "should have been atomic" );
join();
diff --git a/jstests/sort2.js b/jstests/sort2.js
index facd64c..1e21414 100644
--- a/jstests/sort2.js
+++ b/jstests/sort2.js
@@ -1,6 +1,6 @@
// test sorting, mainly a test ver simple with no index
-t = db.sorrrt2;
+t = db.sort2;
t.drop();
t.save({x:1, y:{a:5,b:4}});
diff --git a/jstests/splitvector.js b/jstests/splitvector.js
index 8d86319..da93486 100644
--- a/jstests/splitvector.js
+++ b/jstests/splitvector.js
@@ -11,7 +11,7 @@
// e.g. 20000
// @param maxChunkSize is in MBs.
//
-assertChunkSizes = function ( splitVec , numDocs , maxChunkSize ){
+assertChunkSizes = function ( splitVec , numDocs , maxChunkSize , msg ){
splitVec = [{ x: -1 }].concat( splitVec );
splitVec.push( { x: numDocs+1 } );
for ( i=0; i<splitVec.length-1; i++) {
@@ -22,9 +22,9 @@ assertChunkSizes = function ( splitVec , numDocs , maxChunkSize ){
// It is okay for the last chunk to be smaller. A collection's size does not
// need to be exactly a multiple of maxChunkSize.
if ( i < splitVec.length - 2 )
- assert.close( maxChunkSize , size , "A"+i , -3 );
+ assert.close( maxChunkSize , size , "A"+i , -3 );
else
- assert.gt( maxChunkSize, size, "A"+i );
+ assert.gt( maxChunkSize , size , "A"+i , msg + "b" );
}
}
@@ -37,27 +37,27 @@ f = db.jstests_splitvector;
f.drop();
// -------------------------
-// Case: missing paramters
+// Case 1: missing parameters
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" } ).ok );
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , maxChunkSize: 1} ).ok );
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" } ).ok , "1a" );
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , maxChunkSize: 1} ).ok , "1b" );
// -------------------------
-// Case: missing index
+// Case 2: missing index
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).ok );
+assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).ok , "2");
// -------------------------
-// Case: empty collection
+// Case 3: empty collection
f.ensureIndex( { x: 1} );
-assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).splitKeys );
+assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).splitKeys , "3");
// -------------------------
-// Case: uniform collection
+// Case 4: uniform collection
f.drop();
f.ensureIndex( { x: 1 } );
@@ -67,15 +67,129 @@ filler = "";
while( filler.length < 500 ) filler += "a";
f.save( { x: 0, y: filler } );
docSize = db.runCommand( { datasize: "test.jstests_splitvector" } ).size;
-assert.gt( docSize, 500 );
+assert.gt( docSize, 500 , "4a" );
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 20000;
for( i=1; i<numDocs; i++ ){
f.save( { x: i, y: filler } );
}
+db.getLastError();
res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
-assert.eq( true , res.ok );
-assert.close( numDocs*docSize / (1<<20) , res.splitKeys.length , "num split keys" , -1 );
-assertChunkSizes( res.splitKeys , numDocs, (1<<20) * 0.9 ); // splitVector cuts at 90% of maxChunkSize
+// splitVector aims at getting half-full chunks after split
+factor = 0.5;
+
+assert.eq( true , res.ok , "4b" );
+assert.close( numDocs*docSize / ((1<<20) * factor), res.splitKeys.length , "num split keys" , -1 );
+assertChunkSizes( res.splitKeys , numDocs, (1<<20) * factor , "4d" );
+
+
+// -------------------------
+// Case 5: limit number of split points
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+// Fill collection and get split vector for 1MB maxChunkSize
+numDocs = 10000;
+for( i=1; i<numDocs; i++ ){
+ f.save( { x: i, y: filler } );
+}
+db.getLastError();
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 , maxSplitPoints: 1} );
+
+assert.eq( true , res.ok , "5a" );
+assert.eq( 1 , res.splitKeys.length , "5b" );
+
+
+// -------------------------
+// Case 6: limit number of objects in a chunk
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+// Fill collection and get split vector for 1MB maxChunkSize
+numDocs = 10000;
+for( i=1; i<numDocs; i++ ){
+ f.save( { x: i, y: filler } );
+}
+db.getLastError();
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 , maxChunkObjects: 500} );
+
+assert.eq( true , res.ok , "6a" );
+assert.eq( 19 , res.splitKeys.length , "6b" );
+
+
+// -------------------------
+// Case 7: enough occurances of min key documents to pass the chunk limit
+// [1111111111111111,2,3)
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+// Fill collection and get split vector for 1MB maxChunkSize
+numDocs = 2100;
+for( i=1; i<numDocs; i++ ){
+ f.save( { x: 1, y: filler } );
+}
+
+for( i=1; i<10; i++ ){
+ f.save( { x: 2, y: filler } );
+}
+db.getLastError();
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+
+assert.eq( true , res.ok , "7a" );
+assert.eq( 2 , res.splitKeys[0].x, "7b");
+
+
+// -------------------------
+// Case 8: few occurrances of min key, and enough of some other that we cannot split it
+// [1, 22222222222222, 3)
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+for( i=1; i<10; i++ ){
+ f.save( { x: 1, y: filler } );
+}
+
+numDocs = 2100;
+for( i=1; i<numDocs; i++ ){
+ f.save( { x: 2, y: filler } );
+}
+
+for( i=1; i<10; i++ ){
+ f.save( { x: 3, y: filler } );
+}
+
+db.getLastError();
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+
+assert.eq( true , res.ok , "8a" );
+assert.eq( 2 , res.splitKeys.length , "8b" );
+assert.eq( 2 , res.splitKeys[0].x , "8c" );
+assert.eq( 3 , res.splitKeys[1].x , "8d" );
+
+
+// -------------------------
+// Case 9: splitVector "force" mode, where we split (possible small) chunks in the middle
+//
+
+f.drop();
+f.ensureIndex( { x: 1 } );
+
+f.save( { x: 1 } );
+f.save( { x: 2 } );
+f.save( { x: 3 } );
+db.getLastError();
+
+res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , force : true } );
+
+assert.eq( true , res.ok , "9a" );
+assert.eq( 1 , res.splitKeys.length , "9b" );
+assert.eq( 2 , res.splitKeys[0].x , "9c" );
+
+
+print("PASSED");
diff --git a/jstests/tempCleanup.js b/jstests/temp_cleanup.js
index 0a8a909..e827083 100644
--- a/jstests/tempCleanup.js
+++ b/jstests/temp_cleanup.js
@@ -6,7 +6,7 @@ t.drop()
t.insert( { x : 1 } )
-res = t.mapReduce( function(){ emit(1,1); } , function(){ return 1; } );
+res = t.mapReduce( function(){ emit(1,1); } , function(){ return 1; } , "xyz" );
printjson( res );
assert.eq( 1 , t.count() , "A1" )
diff --git a/jstests/tool/dumprestore2.js b/jstests/tool/dumprestore2.js
index 86e65ae..31822e5 100644
--- a/jstests/tool/dumprestore2.js
+++ b/jstests/tool/dumprestore2.js
@@ -8,6 +8,9 @@ c.save( { a : 22 } );
assert.eq( 1 , c.count() , "setup2" );
t.stop();
+// SERVER-2501 on Windows the mongod may still be running at this point, so we wait for it to stop.
+sleep( 5000 );
+
t.runTool( "dump" , "--dbpath" , t.dbpath , "--out" , t.ext );
resetDbpath( t.dbpath );
diff --git a/jstests/tool/dumprestore3.js b/jstests/tool/dumprestore3.js
new file mode 100644
index 0000000..32e5f35
--- /dev/null
+++ b/jstests/tool/dumprestore3.js
@@ -0,0 +1,60 @@
+// dumprestore3.js
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = "/data/db/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, _isWindows() ? -1 : 255, "mongorestore should exit w/ -1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = "/data/db/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, _isWindows() ? -1 : 255, "mongoreimport should exit w/ -1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/jstests/tool/dumprestore4.js b/jstests/tool/dumprestore4.js
new file mode 100644
index 0000000..568e196
--- /dev/null
+++ b/jstests/tool/dumprestore4.js
@@ -0,0 +1,42 @@
+// dumprestore4.js -- see SERVER-2186
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/jstests/tool/tool1.js b/jstests/tool/tool1.js
index 91fce80..ebe8293 100644
--- a/jstests/tool/tool1.js
+++ b/jstests/tool/tool1.js
@@ -18,7 +18,7 @@ function fileSize(){
port = allocatePorts( 1 )[ 0 ];
resetDbpath( externalPath );
-m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
c = m.getDB( baseName ).getCollection( baseName );
c.save( { a: 1 } );
assert( c.findOne() );
diff --git a/jstests/ts1.js b/jstests/ts1.js
new file mode 100644
index 0000000..062519c
--- /dev/null
+++ b/jstests/ts1.js
@@ -0,0 +1,38 @@
+t = db.ts1
+t.drop()
+
+N = 20
+
+for ( i=0; i<N; i++ ){
+ t.insert( { _id : i , x : new Timestamp() } )
+ sleep( 100 )
+}
+
+function get(i){
+ return t.findOne( { _id : i } ).x;
+}
+
+function cmp( a , b ){
+ if ( a.t < b.t )
+ return -1;
+ if ( a.t > b.t )
+ return 1;
+
+ return a.i - b.i;
+}
+
+for ( i=0; i<N-1; i++ ){
+ a = get(i);
+ b = get(i+1);
+ //print( tojson(a) + "\t" + tojson(b) + "\t" + cmp(a,b) );
+ assert.gt( 0 , cmp( a , b ) , "cmp " + i )
+}
+
+assert.eq( N , t.find( { x : { $type : 17 } } ).itcount() , "B1" )
+assert.eq( 0 , t.find( { x : { $type : 3 } } ).itcount() , "B2" )
+
+t.insert( { _id : 100 , x : new Timestamp( 123456 , 50 ) } )
+x = t.findOne( { _id : 100 } ).x
+assert.eq( 123000 , x.t , "C1" )
+assert.eq( 50 , x.i , "C2" )
+
diff --git a/jstests/update_addToSet3.js b/jstests/update_addToSet3.js
new file mode 100644
index 0000000..e9da58e
--- /dev/null
+++ b/jstests/update_addToSet3.js
@@ -0,0 +1,18 @@
+
+t = db.update_addToSet3
+t.drop()
+
+t.insert( { _id : 1 } )
+
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 6 , 5 , 4 ] } } } )
+assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 ] } , "A1" )
+
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 2 , 1 ] } } } )
+assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 ] } , "A2" )
+
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 4 , 7 , 9 , 2 ] } } } )
+assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 , 7 , 9 ] } , "A3" )
+
+t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 12 , 13 , 12 ] } } } )
+assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 , 7 , 9 , 12 , 13 ] } , "A4" )
+
diff --git a/jstests/update_arraymatch6.js b/jstests/update_arraymatch6.js
new file mode 100644
index 0000000..8892e6f
--- /dev/null
+++ b/jstests/update_arraymatch6.js
@@ -0,0 +1,14 @@
+t = db.jstests_update_arraymatch6;
+t.drop();
+
+function doTest() {
+ t.save( {a: [{id: 1, x: [5,6,7]}, {id: 2, x: [8,9,10]}]} );
+ t.update({'a.id': 1}, {$set: {'a.$.x': [1,1,1]}});
+ assert.automsg( "!db.getLastError()" );
+ assert.eq.automsg( "1", "t.findOne().a[ 0 ].x[ 0 ]" );
+}
+
+doTest();
+t.drop();
+t.ensureIndex( { 'a.id':1 } );
+doTest(); \ No newline at end of file
diff --git a/jstests/update_multi6.js b/jstests/update_multi6.js
new file mode 100644
index 0000000..3799a27
--- /dev/null
+++ b/jstests/update_multi6.js
@@ -0,0 +1,10 @@
+
+t = db.update_multi6
+t.drop();
+
+t.update( { _id : 1 } , { _id : 1 , x : 1 , y : 2 } , true , false );
+assert( t.findOne( { _id : 1 } ) , "A" )
+
+t.update( { _id : 2 } , { _id : 2 , x : 1 , y : 2 } , true , true );
+assert( db.getLastError() , "B: " + db.getLastErrorCmd() );
+