summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
committerAntonin Kral <a.kral@bobek.cz>2011-09-14 17:08:06 +0200
commit5d342a758c6095b4d30aba0750b54f13b8916f51 (patch)
tree762e9aa84781f5e3b96db2c02d356c29cf0217c0 /jstests
parentcbe2d992e9cd1ea66af9fa91df006106775d3073 (diff)
downloadmongodb-5d342a758c6095b4d30aba0750b54f13b8916f51.tar.gz
Imported Upstream version 2.0.0
Diffstat (limited to 'jstests')
-rw-r--r--jstests/ageoutjournalfiles.js16
-rw-r--r--jstests/and.js86
-rw-r--r--jstests/and2.js27
-rw-r--r--jstests/and3.js66
-rw-r--r--jstests/andor.js105
-rw-r--r--jstests/apitest_dbcollection.js2
-rw-r--r--jstests/array_match2.js25
-rw-r--r--jstests/array_match3.js13
-rw-r--r--jstests/arrayfind2.js3
-rw-r--r--jstests/arrayfind4.js22
-rw-r--r--jstests/arrayfind5.js23
-rw-r--r--jstests/auth/auth1.js5
-rw-r--r--jstests/auth/auth2.js23
-rw-r--r--jstests/auth/rename.js40
-rw-r--r--jstests/auth1.js17
-rw-r--r--jstests/auth2.js6
-rw-r--r--jstests/bench_test1.js16
-rw-r--r--jstests/bench_test2.js41
-rw-r--r--jstests/big_object1.js2
-rw-r--r--jstests/binData.js14
-rw-r--r--jstests/capped.js8
-rw-r--r--jstests/capped2.js10
-rw-r--r--jstests/capped5.js7
-rw-r--r--jstests/capped6.js2
-rw-r--r--jstests/capped8.js40
-rw-r--r--jstests/capped9.js28
-rw-r--r--jstests/cappeda.js33
-rw-r--r--jstests/compact.js37
-rwxr-xr-xjstests/compact_speed_test.js61
-rw-r--r--jstests/date1.js5
-rw-r--r--jstests/date2.js13
-rw-r--r--jstests/date3.js31
-rw-r--r--jstests/dbcase.js16
-rw-r--r--jstests/dbcase2.js9
-rw-r--r--jstests/dbhash.js10
-rw-r--r--jstests/delx.js1
-rw-r--r--jstests/disk/directoryperdb.js2
-rw-r--r--jstests/disk/diskfull.js8
-rw-r--r--jstests/disk/newcollection.js20
-rw-r--r--jstests/disk/norepeat.js2
-rw-r--r--jstests/disk/quota.js47
-rw-r--r--jstests/disk/quota2.js38
-rw-r--r--jstests/disk/repair3.js2
-rw-r--r--jstests/disk/repair5.js43
-rw-r--r--jstests/distinct1.js1
-rw-r--r--jstests/distinct_index1.js10
-rw-r--r--jstests/drop2.js2
-rw-r--r--jstests/drop3.js29
-rw-r--r--jstests/dropdb.js17
-rw-r--r--jstests/dropdb_race.js44
-rw-r--r--jstests/dur/closeall.js76
-rw-r--r--jstests/dur/data/empty.bson0
-rw-r--r--jstests/dur/diskfull.js51
-rw-r--r--jstests/dur/dropdb.js21
-rwxr-xr-xjstests/dur/dur1.js25
-rwxr-xr-xjstests/dur/dur1_tool.js152
-rw-r--r--jstests/dur/indexbg.js7
-rw-r--r--jstests/dur/indexbg2.js19
-rwxr-xr-xjstests/dur/manyRestart.js4
-rw-r--r--jstests/eval_nolock.js2
-rw-r--r--jstests/evalb.js2
-rw-r--r--jstests/evalc.js11
-rw-r--r--jstests/evald.js10
-rw-r--r--jstests/exists3.js21
-rw-r--r--jstests/exists4.js20
-rw-r--r--jstests/exists5.js33
-rw-r--r--jstests/exists6.js63
-rw-r--r--jstests/exists7.js21
-rw-r--r--jstests/exists8.js76
-rw-r--r--jstests/exists9.js41
-rw-r--r--jstests/find8.js27
-rw-r--r--jstests/find_and_modify2.js6
-rw-r--r--jstests/fsync.js17
-rw-r--r--jstests/geo10.js21
-rw-r--r--jstests/geo4.js2
-rw-r--r--jstests/geo_array0.js25
-rw-r--r--jstests/geo_array1.js30
-rw-r--r--jstests/geo_array2.js163
-rw-r--r--jstests/geo_borders.js263
-rw-r--r--jstests/geo_center_sphere2.js158
-rw-r--r--jstests/geo_distinct.js16
-rw-r--r--jstests/geo_fiddly_box.js44
-rw-r--r--jstests/geo_fiddly_box2.js32
-rw-r--r--jstests/geo_group.js35
-rw-r--r--jstests/geo_mapreduce.js56
-rw-r--r--jstests/geo_mapreduce2.js36
-rw-r--r--jstests/geo_multinest0.js63
-rw-r--r--jstests/geo_multinest1.js37
-rw-r--r--jstests/geo_oob_sphere.js42
-rw-r--r--jstests/geo_poly_edge.js22
-rw-r--r--jstests/geo_poly_line.js17
-rw-r--r--jstests/geo_polygon1.js74
-rw-r--r--jstests/geo_polygon2.js266
-rw-r--r--jstests/geo_polygon3.js54
-rw-r--r--jstests/geo_regex0.js18
-rw-r--r--jstests/geo_small_large.js151
-rw-r--r--jstests/geo_uniqueDocs.js38
-rw-r--r--jstests/getlog1.js24
-rw-r--r--jstests/group7.js43
-rw-r--r--jstests/hint1.js12
-rw-r--r--jstests/idhack.js23
-rw-r--r--jstests/in8.js23
-rw-r--r--jstests/in9.js35
-rw-r--r--jstests/ina.js15
-rw-r--r--jstests/index11.js30
-rw-r--r--jstests/index9.js8
-rw-r--r--jstests/index_big1.js39
-rwxr-xr-xjstests/index_bigkeys.js78
-rw-r--r--jstests/index_check5.js2
-rw-r--r--jstests/index_check8.js12
-rw-r--r--jstests/index_fornew.js13
-rw-r--r--jstests/index_maxkey.js27
-rwxr-xr-xjstests/indexbindata.js0
-rw-r--r--jstests/indexk.js58
-rw-r--r--jstests/indexl.js27
-rw-r--r--jstests/indexm.js38
-rw-r--r--jstests/indexn.js41
-rw-r--r--jstests/indexo.js32
-rw-r--r--jstests/indexp.js58
-rw-r--r--jstests/indexq.js14
-rw-r--r--jstests/indexr.js47
-rw-r--r--jstests/indexs.js21
-rw-r--r--jstests/indext.js21
-rw-r--r--jstests/indexu.js137
-rw-r--r--jstests/indexv.js18
-rw-r--r--jstests/indexw.js14
-rw-r--r--jstests/insert1.js3
-rw-r--r--jstests/libs/geo_near_random.js37
-rw-r--r--jstests/libs/key1 (renamed from jstests/replsets/key1)0
-rw-r--r--jstests/libs/key2 (renamed from jstests/replsets/key2)0
-rw-r--r--jstests/libs/testconfig4
-rw-r--r--jstests/mr_errorhandling.js2
-rw-r--r--jstests/mr_merge2.js37
-rw-r--r--jstests/numberint.js92
-rw-r--r--jstests/numberlong2.js32
-rw-r--r--jstests/numberlong3.js25
-rw-r--r--jstests/or1.js2
-rw-r--r--jstests/or2.js3
-rw-r--r--jstests/or3.js4
-rw-r--r--jstests/or4.js2
-rw-r--r--jstests/ord.js1
-rw-r--r--jstests/org.js19
-rw-r--r--jstests/orh.js17
-rw-r--r--jstests/ori.js48
-rw-r--r--jstests/orj.js121
-rw-r--r--jstests/ork.js11
-rw-r--r--jstests/orl.js13
-rw-r--r--jstests/orm.js29
-rw-r--r--jstests/orn.js22
-rw-r--r--jstests/profile1.js144
-rw-r--r--jstests/profile2.js19
-rw-r--r--jstests/profile3.js26
-rw-r--r--jstests/push.js36
-rw-r--r--jstests/query1.js3
-rw-r--r--jstests/regex2.js8
-rw-r--r--jstests/regex6.js11
-rw-r--r--jstests/regexa.js19
-rw-r--r--jstests/remove10.js28
-rw-r--r--jstests/remove2.js5
-rw-r--r--jstests/remove9.js16
-rw-r--r--jstests/rename.js19
-rw-r--r--jstests/repl/basic1.js19
-rw-r--r--jstests/repl/dbcase.js95
-rw-r--r--jstests/repl/drop_dups.js68
-rw-r--r--jstests/repl/mastermaster1.js23
-rw-r--r--jstests/repl/mod_move.js69
-rw-r--r--jstests/repl/pair1.js100
-rw-r--r--jstests/repl/pair2.js71
-rw-r--r--jstests/repl/pair3.js245
-rw-r--r--jstests/repl/pair4.js160
-rw-r--r--jstests/repl/pair5.js95
-rw-r--r--jstests/repl/pair6.js115
-rw-r--r--jstests/repl/pair7.js85
-rw-r--r--jstests/repl/repl2.js29
-rw-r--r--jstests/repl/repl3.js58
-rw-r--r--jstests/repl/replacePeer1.js82
-rw-r--r--jstests/repl/replacePeer2.js86
-rw-r--r--jstests/repl/snapshot2.js72
-rw-r--r--jstests/repl/snapshot3.js53
-rw-r--r--jstests/replsets/auth1.js35
-rw-r--r--jstests/replsets/cloneDb.js18
-rw-r--r--jstests/replsets/config1.js21
-rwxr-xr-xjstests/replsets/downstream.js36
-rw-r--r--jstests/replsets/fastsync.js151
-rw-r--r--jstests/replsets/initial_sync1.js5
-rw-r--r--jstests/replsets/initial_sync3.js37
-rw-r--r--jstests/replsets/maintenance.js32
-rw-r--r--jstests/replsets/majority.js60
-rw-r--r--jstests/replsets/randomcommands1.js29
-rw-r--r--jstests/replsets/reconfig.js69
-rw-r--r--jstests/replsets/remove1.js130
-rw-r--r--jstests/replsets/replset1.js22
-rw-r--r--jstests/replsets/replset3.js2
-rw-r--r--jstests/replsets/replset5.js88
-rw-r--r--jstests/replsets/replsetadd.js34
-rw-r--r--jstests/replsets/replsetarb1.js33
-rw-r--r--jstests/replsets/replsetarb2.js13
-rw-r--r--jstests/replsets/replsetarb3.js144
-rw-r--r--jstests/replsets/replsetfreeze.js4
-rw-r--r--jstests/replsets/replsetrestart1.js14
-rw-r--r--jstests/replsets/replsetrestart2.js8
-rw-r--r--jstests/replsets/rollback2.js19
-rw-r--r--jstests/replsets/rollback4.js117
-rw-r--r--jstests/replsets/rslib.js44
-rw-r--r--jstests/replsets/slavedelay1.js104
-rw-r--r--jstests/replsets/stale_clustered.js101
-rw-r--r--jstests/replsets/stepdown.js142
-rwxr-xr-xjstests/replsets/stepdown2.js139
-rw-r--r--jstests/replsets/sync1.js396
-rw-r--r--jstests/replsets/sync2.js48
-rw-r--r--jstests/replsets/tags.js154
-rw-r--r--jstests/replsets/tags2.js44
-rw-r--r--jstests/replsets/toostale.js34
-rw-r--r--jstests/replsets/twosets.js35
-rw-r--r--jstests/set7.js16
-rw-r--r--jstests/sharding/addshard1.js2
-rw-r--r--jstests/sharding/addshard4.js26
-rw-r--r--jstests/sharding/array_shard_key.js127
-rw-r--r--jstests/sharding/auth.js177
-rw-r--r--jstests/sharding/count_slaveok.js69
-rw-r--r--jstests/sharding/drop_sharded_db.js62
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/features3.js61
-rw-r--r--jstests/sharding/group_slaveok.js68
-rw-r--r--jstests/sharding/index1.js174
-rw-r--r--jstests/sharding/migrateBig.js2
-rw-r--r--jstests/sharding/migrateMemory.js54
-rw-r--r--jstests/sharding/multi_mongos1.js3
-rw-r--r--jstests/sharding/multi_mongos2.js61
-rw-r--r--jstests/sharding/parallel.js38
-rw-r--r--jstests/sharding/shard3.js12
-rw-r--r--jstests/sharding/shard6.js3
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js3
-rw-r--r--jstests/sharding/shard_keycount.js45
-rw-r--r--jstests/sharding/sharding_with_keyfile.js69
-rwxr-xr-xjstests/sharding/sharding_with_keyfile.key3
-rw-r--r--jstests/sharding/sync6.js81
-rw-r--r--jstests/sharding/sync7.js63
-rw-r--r--jstests/shell1.js6
-rw-r--r--jstests/shellkillop.js126
-rw-r--r--jstests/shellspawn.js6
-rw-r--r--jstests/skip1.js15
-rw-r--r--jstests/slowNightly/background.js51
-rw-r--r--jstests/slowNightly/command_line_parsing.js12
-rw-r--r--jstests/slowNightly/dur_big_atomic_update.js17
-rw-r--r--jstests/slowNightly/dur_remove_old_journals.js27
-rw-r--r--jstests/slowNightly/geo_axis_aligned.js108
-rw-r--r--jstests/slowNightly/geo_mnypts.js51
-rw-r--r--jstests/slowNightly/geo_polygon.js53
-rw-r--r--jstests/slowNightly/index_check10.js133
-rw-r--r--jstests/slowNightly/index_check9.js2
-rw-r--r--jstests/slowNightly/replReads.js108
-rw-r--r--jstests/slowNightly/replsets_priority1.js173
-rw-r--r--jstests/slowNightly/sharding_balance1.js3
-rw-r--r--jstests/slowNightly/sharding_balance4.js8
-rw-r--r--jstests/slowNightly/sharding_migrateBigObject.js61
-rw-r--r--jstests/slowNightly/sharding_multiple_ns_rs.js49
-rw-r--r--jstests/slowNightly/sharding_passthrough.js16
-rw-r--r--jstests/slowNightly/sharding_rs1.js8
-rw-r--r--jstests/slowNightly/sharding_rs2.js22
-rw-r--r--jstests/slowNightly/sharding_rs_arb1.js40
-rw-r--r--jstests/slowNightly/sync6_slow.js82
-rw-r--r--jstests/slowWeekly/geo_full.js487
-rw-r--r--jstests/slowWeekly/geo_mnypts_plus_fields.js98
-rw-r--r--jstests/slowWeekly/query_yield2.js2
-rw-r--r--jstests/slowWeekly/repair2.js29
-rw-r--r--jstests/slowWeekly/update_yield1.js2
-rw-r--r--jstests/sort10.js48
-rw-r--r--jstests/sort2.js22
-rw-r--r--jstests/sort7.js25
-rw-r--r--jstests/sort8.js30
-rw-r--r--jstests/sort9.js26
-rw-r--r--jstests/sorta.js26
-rw-r--r--jstests/tool/csv1.js8
-rw-r--r--jstests/tool/csvexport1.js45
-rw-r--r--jstests/tool/csvexport2.js31
-rw-r--r--jstests/tool/csvimport1.js40
-rw-r--r--jstests/tool/data/a.tsv2
-rw-r--r--jstests/tool/data/csvimport1.csv8
-rw-r--r--jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--jstests/tool/dumprestore5.js36
-rw-r--r--jstests/tool/dumprestore6.js27
-rw-r--r--jstests/tool/exportimport1.js29
-rw-r--r--jstests/tool/tsv1.js32
-rw-r--r--jstests/type2.js19
-rw-r--r--jstests/type3.js68
-rw-r--r--jstests/unique2.js53
-rw-r--r--jstests/uniqueness.js13
-rw-r--r--jstests/update.js13
-rw-r--r--jstests/update_blank1.js12
-rw-r--r--jstests/update_invalid1.js6
-rw-r--r--jstests/updatea.js6
-rw-r--r--jstests/updatef.js24
-rw-r--r--jstests/updateg.js17
295 files changed, 10429 insertions, 2413 deletions
diff --git a/jstests/ageoutjournalfiles.js b/jstests/ageoutjournalfiles.js
new file mode 100644
index 0000000..3c12cd8
--- /dev/null
+++ b/jstests/ageoutjournalfiles.js
@@ -0,0 +1,16 @@
+if (false && db.serverStatus().dur) {
+
+ assert(db.serverStatus().dur.ageOutJournalFiles != false);
+
+ db.adminCommand({ setParameter: 1, ageOutJournalFiles: false });
+
+ assert(db.serverStatus().dur.ageOutJournalFiles == false);
+
+ db.adminCommand({ setParameter: 1, ageOutJournalFiles: true });
+
+ assert(db.serverStatus().dur.ageOutJournalFiles != false);
+
+}
+else {
+// print("dur is off");
+} \ No newline at end of file
diff --git a/jstests/and.js b/jstests/and.js
new file mode 100644
index 0000000..bd6dbcd
--- /dev/null
+++ b/jstests/and.js
@@ -0,0 +1,86 @@
+// Some tests for $and SERVER-1089
+
+t = db.jstests_and;
+t.drop();
+
+t.save( {a:[1,2]} );
+t.save( {a:'foo'} );
+
+function check() {
+ // $and must be an array
+ assert.throws( function() { t.find( {$and:4} ).toArray() } );
+ // $and array must not be empty
+ assert.throws( function() { t.find( {$and:[]} ).toArray() } );
+ // $and elements must be objects
+ assert.throws( function() { t.find( {$and:[4]} ).toArray() } );
+
+ // Check equality matching
+ assert.eq( 1, t.count( {$and:[{a:1}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:1},{a:2}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:1},{a:3}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:1},{a:2},{a:3}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:'foo'},{a:'g'}]} ) );
+
+ // Check $and with other fields
+ assert.eq( 1, t.count( {a:2,$and:[{a:1}]} ) );
+ assert.eq( 0, t.count( {a:0,$and:[{a:1}]} ) );
+ assert.eq( 0, t.count( {a:2,$and:[{a:0}]} ) );
+ assert.eq( 1, t.count( {a:1,$and:[{a:1}]} ) );
+
+ // Check recursive $and
+ assert.eq( 1, t.count( {a:2,$and:[{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {a:0,$and:[{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {a:2,$and:[{$and:[{a:0}]}]} ) );
+ assert.eq( 1, t.count( {a:1,$and:[{$and:[{a:1}]}]} ) );
+
+ assert.eq( 1, t.count( {$and:[{a:2},{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:0},{$and:[{a:1}]}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:2},{$and:[{a:0}]}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:1},{$and:[{a:1}]}]} ) );
+
+ // Some of these cases were more important with an alternative $and syntax
+ // that was rejected, but they're still valid checks.
+
+ // Check simple regex
+ assert.eq( 1, t.count( {$and:[{a:/foo/}]} ) );
+ // Check multiple regexes
+ assert.eq( 1, t.count( {$and:[{a:/foo/},{a:/^f/},{a:/o/}]} ) );
+ assert.eq( 0, t.count( {$and:[{a:/foo/},{a:/^g/}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:/^f/},{a:'foo'}]} ) );
+ // Check regex flags
+ assert.eq( 0, t.count( {$and:[{a:/^F/},{a:'foo'}]} ) );
+ assert.eq( 1, t.count( {$and:[{a:/^F/i},{a:'foo'}]} ) );
+
+
+
+ // Check operator
+ assert.eq( 1, t.count( {$and:[{a:{$gt:0}}]} ) );
+
+ // Check where
+ assert.eq( 1, t.count( {a:'foo',$where:'this.a=="foo"'} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
+ assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
+
+ // Nested where ok
+ assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}]}) );
+ assert.eq( 1, t.count({$and:[{a:'foo'},{$where:'this.a=="foo"'}]}) );
+ assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}],$where:'this.a=="foo"'}) );
+}
+
+check();
+t.ensureIndex( {a:1} );
+check();
+var e = t.find( {$and:[{a:1}]} ).explain();
+assert.eq( 'BtreeCursor a_1', e.cursor );
+assert.eq( [[1,1]], e.indexBounds.a );
+
+function checkBounds( query ) {
+ var e = t.find( query ).explain();
+ assert.eq( 1, e.n );
+ assert.eq( [[1,1]], e.indexBounds.a );
+}
+
+// Since this is a multikey index, we get the bounds from the first constraint scanned.
+checkBounds( {a:1,$and:[{a:2}]} );
+checkBounds( {$and:[{a:1},{a:2}]} );
diff --git a/jstests/and2.js b/jstests/and2.js
new file mode 100644
index 0000000..0bd13eb
--- /dev/null
+++ b/jstests/and2.js
@@ -0,0 +1,27 @@
+// Test dollar sign operator with $and SERVER-1089
+
+t = db.jstests_and2;
+
+t.drop();
+t.save( {a:[1,2]} );
+t.update( {a:1}, {$set:{'a.$':5}} );
+assert.eq( [5,2], t.findOne().a );
+
+t.drop();
+t.save( {a:[1,2]} );
+t.update( {$and:[{a:1}]}, {$set:{'a.$':5}} );
+assert.eq( [5,2], t.findOne().a );
+
+// Make sure dollar sign operator with $and is consistent with no $and case
+t.drop();
+t.save( {a:[1,2],b:[3,4]} );
+t.update( {a:1,b:4}, {$set:{'a.$':5}} );
+// Probably not what we want here, just trying to make sure $and is consistent
+assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] );
+
+// Make sure dollar sign operator with $and is consistent with no $and case
+t.drop();
+t.save( {a:[1,2],b:[3,4]} );
+t.update( {a:1,$and:[{b:4}]}, {$set:{'a.$':5}} );
+// Probably not what we want here, just trying to make sure $and is consistent
+assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] );
diff --git a/jstests/and3.js b/jstests/and3.js
new file mode 100644
index 0000000..98a0974
--- /dev/null
+++ b/jstests/and3.js
@@ -0,0 +1,66 @@
+// Check key match with sub matchers - part of SERVER-3192
+
+t = db.jstests_and3;
+t.drop();
+
+t.save( {a:1} );
+t.save( {a:'foo'} );
+
+t.ensureIndex( {a:1} );
+
+function checkScanMatch( query, nscannedObjects, n ) {
+ var e = t.find( query ).hint( {a:1} ).explain();
+ // NOTE The nscannedObjects values aren't necessarily optimal currently,
+ // we're just checking current behavior here.
+ assert.eq( nscannedObjects, e.nscannedObjects );
+ assert.eq( n, e.n );
+}
+
+checkScanMatch( {a:/o/}, 1, 1 );
+checkScanMatch( {a:/a/}, 0, 0 );
+checkScanMatch( {a:{$not:/o/}}, 2, 1 );
+checkScanMatch( {a:{$not:/a/}}, 2, 2 );
+
+checkScanMatch( {$and:[{a:/o/}]}, 1, 1 );
+checkScanMatch( {$and:[{a:/a/}]}, 0, 0 );
+checkScanMatch( {$and:[{a:{$not:/o/}}]}, 2, 1 );
+checkScanMatch( {$and:[{a:{$not:/a/}}]}, 2, 2 );
+checkScanMatch( {$and:[{a:/o/},{a:{$not:/o/}}]}, 1, 0 );
+checkScanMatch( {$and:[{a:/o/},{a:{$not:/a/}}]}, 1, 1 );
+checkScanMatch( {$or:[{a:/o/}]}, 1, 1 );
+checkScanMatch( {$or:[{a:/a/}]}, 0, 0 );
+checkScanMatch( {$nor:[{a:/o/}]}, 2, 1 );
+checkScanMatch( {$nor:[{a:/a/}]}, 2, 2 );
+
+checkScanMatch( {$and:[{$and:[{a:/o/}]}]}, 1, 1 );
+checkScanMatch( {$and:[{$and:[{a:/a/}]}]}, 0, 0 );
+checkScanMatch( {$and:[{$and:[{a:{$not:/o/}}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$and:[{a:{$not:/a/}}]}]}, 2, 2 );
+checkScanMatch( {$and:[{$or:[{a:/o/}]}]}, 1, 1 );
+checkScanMatch( {$and:[{$or:[{a:/a/}]}]}, 0, 0 );
+checkScanMatch( {$or:[{a:{$not:/o/}}]}, 2, 1 );
+checkScanMatch( {$and:[{$or:[{a:{$not:/o/}}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$or:[{a:{$not:/a/}}]}]}, 2, 2 );
+checkScanMatch( {$and:[{$nor:[{a:/o/}]}]}, 2, 1 );
+checkScanMatch( {$and:[{$nor:[{a:/a/}]}]}, 2, 2 );
+
+checkScanMatch( {$where:'this.a==1'}, 2, 1 );
+checkScanMatch( {$and:[{$where:'this.a==1'}]}, 2, 1 );
+
+checkScanMatch( {a:1,$where:'this.a==1'}, 1, 1 );
+checkScanMatch( {a:1,$and:[{$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {$and:[{a:1},{$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {$and:[{a:1,$where:'this.a==1'}]}, 1, 1 );
+checkScanMatch( {a:1,$and:[{a:1},{a:1,$where:'this.a==1'}]}, 1, 1 );
+
+function checkImpossibleMatch( query ) {
+ var e = t.find( query ).explain();
+ assert.eq( 0, e.n );
+ assert.eq( 'BasicCursor', e.cursor );
+}
+
+// With a single key index, all bounds are utilized.
+assert.eq( [[1,1]], t.find( {$and:[{a:1}]} ).explain().indexBounds.a );
+assert.eq( [[1,1]], t.find( {a:1,$and:[{a:1}]} ).explain().indexBounds.a );
+checkImpossibleMatch( {a:1,$and:[{a:2}]} );
+checkImpossibleMatch( {$and:[{a:1},{a:2}]} );
diff --git a/jstests/andor.js b/jstests/andor.js
new file mode 100644
index 0000000..fae6ee4
--- /dev/null
+++ b/jstests/andor.js
@@ -0,0 +1,105 @@
+// SERVER-1089 Test and/or nesting
+
+t = db.jstests_andor;
+t.drop();
+
+// not ok
+function ok( q ) {
+ assert.eq( 1, t.find( q ).itcount() );
+}
+
+t.save( {a:1} );
+
+test = function() {
+
+ ok( {a:1} );
+
+ ok( {$and:[{a:1}]} );
+ ok( {$or:[{a:1}]} );
+
+ ok( {$and:[{$and:[{a:1}]}]} );
+ ok( {$or:[{$or:[{a:1}]}]} );
+
+ ok( {$and:[{$or:[{a:1}]}]} );
+ ok( {$or:[{$and:[{a:1}]}]} );
+
+ ok( {$and:[{$and:[{$or:[{a:1}]}]}]} );
+ ok( {$and:[{$or:[{$and:[{a:1}]}]}]} );
+ ok( {$or:[{$and:[{$and:[{a:1}]}]}]} );
+
+ ok( {$or:[{$and:[{$or:[{a:1}]}]}]} );
+
+ // now test $nor
+
+ ok( {$and:[{a:1}]} );
+ ok( {$nor:[{a:2}]} );
+
+ ok( {$and:[{$and:[{a:1}]}]} );
+ ok( {$nor:[{$nor:[{a:1}]}]} );
+
+ ok( {$and:[{$nor:[{a:2}]}]} );
+ ok( {$nor:[{$and:[{a:2}]}]} );
+
+ ok( {$and:[{$and:[{$nor:[{a:2}]}]}]} );
+ ok( {$and:[{$nor:[{$and:[{a:2}]}]}]} );
+ ok( {$nor:[{$and:[{$and:[{a:2}]}]}]} );
+
+ ok( {$nor:[{$and:[{$nor:[{a:1}]}]}]} );
+
+}
+
+test();
+t.ensureIndex( {a:1} );
+test();
+
+// Test an inequality base match.
+
+test = function() {
+
+ ok( {a:{$ne:2}} );
+
+ ok( {$and:[{a:{$ne:2}}]} );
+ ok( {$or:[{a:{$ne:2}}]} );
+
+ ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
+ ok( {$or:[{$or:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$or:[{a:{$ne:2}}]}]} );
+ ok( {$or:[{$and:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
+ ok( {$and:[{$or:[{$and:[{a:{$ne:2}}]}]}]} );
+ ok( {$or:[{$and:[{$and:[{a:{$ne:2}}]}]}]} );
+
+ ok( {$or:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
+
+ // now test $nor
+
+ ok( {$and:[{a:{$ne:2}}]} );
+ ok( {$nor:[{a:{$ne:1}}]} );
+
+ ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
+ ok( {$nor:[{$nor:[{a:{$ne:2}}]}]} );
+
+ ok( {$and:[{$nor:[{a:{$ne:1}}]}]} );
+ ok( {$nor:[{$and:[{a:{$ne:1}}]}]} );
+
+ ok( {$and:[{$and:[{$nor:[{a:{$ne:1}}]}]}]} );
+ ok( {$and:[{$nor:[{$and:[{a:{$ne:1}}]}]}]} );
+ ok( {$nor:[{$and:[{$and:[{a:{$ne:1}}]}]}]} );
+
+ ok( {$nor:[{$and:[{$nor:[{a:{$ne:2}}]}]}]} );
+
+}
+
+t.drop();
+t.save( {a:1} );
+test();
+t.ensureIndex( {a:1} );
+test();
+
+t.drop();
+t.ensureIndex( {a:1} );
+var e = t.find( {$and:[{a:1}]} ).explain();
+// nested $or clauses currently ignored for indexing
+assert.eq( e.indexBounds, t.find( {$and:[{a:1,$or:[{a:2}]}]} ).explain().indexBounds );
diff --git a/jstests/apitest_dbcollection.js b/jstests/apitest_dbcollection.js
index f6e74da..0983b06 100644
--- a/jstests/apitest_dbcollection.js
+++ b/jstests/apitest_dbcollection.js
@@ -55,7 +55,7 @@ if( v.ns != "test.test_db" ) {
assert (v.ns == "test.test_db",9);
assert (v.ok == 1,10);
-assert(v.result.toString().match(/nrecords\?:(\d+)/)[1] == 100,11);
+assert.eq(100,v.nrecords,11)
/*
* test deleteIndex, deleteIndexes
diff --git a/jstests/array_match2.js b/jstests/array_match2.js
new file mode 100644
index 0000000..d64ca1b
--- /dev/null
+++ b/jstests/array_match2.js
@@ -0,0 +1,25 @@
+// Different recursive array match cases SERVER-2898
+
+t = db.jstests_array_match2;
+t.drop();
+
+t.save( {a:[{1:4},5]} );
+// When the array index is the last field, both of these match types work.
+assert.eq( 1, t.count( {'a.1':4} ) );
+assert.eq( 1, t.count( {'a.1':5} ) );
+
+t.remove();
+// When the array index is not the last field, only one of the match types works.
+t.save( {a:[{1:{foo:4}},{foo:5}]} );
+if ( 0 ) { // SERVER-2898
+assert.eq( 1, t.count( {'a.1.foo':4} ) );
+}
+assert.eq( 1, t.count( {'a.1.foo':5} ) );
+
+// Same issue with the $exists operator
+t.remove();
+t.save( {a:[{1:{foo:4}},{}]} );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+if ( 0 ) { // SERVER-2898
+assert.eq( 1, t.count( {'a.1.foo':{$exists:true}} ) );
+}
diff --git a/jstests/array_match3.js b/jstests/array_match3.js
new file mode 100644
index 0000000..c865343
--- /dev/null
+++ b/jstests/array_match3.js
@@ -0,0 +1,13 @@
+// SERVER-2902 Test indexing of numerically referenced array elements.
+
+t = db.jstests_array_match3;
+t.drop();
+
+// Test matching numericallly referenced array element.
+t.save( {a:{'0':5}} );
+t.save( {a:[5]} );
+assert.eq( 2, t.count( {'a.0':5} ) );
+
+// Test with index.
+t.ensureIndex( {'a.0':1} );
+assert.eq( 2, t.count( {'a.0':5} ) );
diff --git a/jstests/arrayfind2.js b/jstests/arrayfind2.js
index 94d77f1..1e63bf6 100644
--- a/jstests/arrayfind2.js
+++ b/jstests/arrayfind2.js
@@ -32,4 +32,5 @@ assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } },
t.ensureIndex( { "a.x":1,"a.y":-1 } );
-assert.eq( {"a.x":[[3,3]],"a.y":[[1.7976931348623157e+308,4]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
+// TODO Index bounds below for elemMatch could be improved. - SERVER-3104
+assert.eq( {"a.x":[[3,3]],"a.y":[[{$maxElement:1},{$minElement:1}]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds );
diff --git a/jstests/arrayfind4.js b/jstests/arrayfind4.js
new file mode 100644
index 0000000..b141425
--- /dev/null
+++ b/jstests/arrayfind4.js
@@ -0,0 +1,22 @@
+// Test query empty array SERVER-2258
+
+t = db.jstests_arrayfind4;
+t.drop();
+
+t.save( {a:[]} );
+t.ensureIndex( {a:1} );
+
+assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
+
+t.remove();
+t.save( {a:[[]]} );
+
+assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
diff --git a/jstests/arrayfind5.js b/jstests/arrayfind5.js
new file mode 100644
index 0000000..083dc06
--- /dev/null
+++ b/jstests/arrayfind5.js
@@ -0,0 +1,23 @@
+// Test indexed elemmatch of missing field.
+
+t = db.jstests_arrayfind5;
+t.drop();
+
+function check( nullElemMatch ) {
+ assert.eq( 1, t.find( {'a.b':1} ).itcount() );
+ assert.eq( 1, t.find( {a:{$elemMatch:{b:1}}} ).itcount() );
+ assert.eq( 0, t.find( {'a.b':null} ).itcount() );
+ assert.eq( nullElemMatch ? 1 : 0, t.find( {a:{$elemMatch:{b:null}}} ).itcount() ); // see SERVER-3377
+}
+
+t.save( {a:[{},{b:1}]} );
+check( true );
+t.ensureIndex( {'a.b':1} );
+check( true );
+
+t.drop();
+
+t.save( {a:[5,{b:1}]} );
+check( false );
+t.ensureIndex( {'a.b':1} );
+check( false );
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index 2f2a1b4..c837085 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -26,6 +26,11 @@ for( i = 0; i < 999; ++i ) {
assert.eq( 999, t.count() , "A1" );
assert.eq( 999, t.find().toArray().length , "A2" );
+db.setProfilingLevel( 2 );
+t.count();
+db.setProfilingLevel( 0 );
+assert.lt( 0 , db.system.profile.find( { user : "eliot" } ).count() , "AP1" )
+
assert.eq( 999, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A3" );
db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:999} ) } );
assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A4" );
diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js
new file mode 100644
index 0000000..4f30894
--- /dev/null
+++ b/jstests/auth/auth2.js
@@ -0,0 +1,23 @@
+// test read/write permissions
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "jstests_auth_auth2";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" , "--nojournal" , "--smallfiles" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+users = db.getCollection( "system.users" );
+assert.eq( 0 , users.count() );
+
+db.addUser( "eliot" , "eliot" );
+
+assert.throws( function(){ db.users.count(); } )
+
+assert.throws( function() { db.shutdownServer(); } )
+
+db.auth( "eliot" , "eliot" )
+
+db.shutdownServer();
diff --git a/jstests/auth/rename.js b/jstests/auth/rename.js
new file mode 100644
index 0000000..5411298
--- /dev/null
+++ b/jstests/auth/rename.js
@@ -0,0 +1,40 @@
+// test renameCollection with auth
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_rename_auth";
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface" );
+
+db1 = m.getDB( baseName )
+db2 = m.getDB( baseName + '_other' )
+admin = m.getDB( 'admin' )
+
+// auth not yet checked since we are on localhost
+db1.addUser( "foo", "bar" );
+db2.addUser( "bar", "foo" );
+
+printjson(db1.a.count());
+db1.a.save({});
+assert.eq(db1.a.count(), 1);
+
+//this makes auth required on localhost
+admin.addUser('not', 'used');
+
+// can't run same db w/o auth
+assert.commandFailed( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+
+// can run same db with auth
+db1.auth('foo', 'bar')
+assert.commandWorked( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+
+// can't run diff db w/o auth
+assert.commandFailed( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+
+// can run diff db with auth
+db2.auth('bar', 'foo');
+assert.commandWorked( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+
+// test post conditions
+assert.eq(db1.a.count(), 0);
+assert.eq(db1.b.count(), 0);
+assert.eq(db2.a.count(), 1);
diff --git a/jstests/auth1.js b/jstests/auth1.js
index ce0159b..a2cc48a 100644
--- a/jstests/auth1.js
+++ b/jstests/auth1.js
@@ -38,3 +38,20 @@ pass = "a" + Math.random();
db2.addUser( "eliot" , pass );
assert.commandFailed( db2.runCommand( { authenticate: 1, user: "eliot", nonce: "foo", key: "bar" } ) );
+
+// check sanity check SERVER-3003
+
+before = db2.system.users.count()
+
+assert.throws( function(){
+ db2.addUser( "" , "abc" )
+} , null , "C1" )
+
+assert.throws( function(){
+ db2.addUser( "abc" , "" )
+} , null , "C2" )
+
+
+after = db2.system.users.count()
+assert( before > 0 , "C3" )
+assert.eq( before , after , "C4" )
diff --git a/jstests/auth2.js b/jstests/auth2.js
index 9b6dfad..9c2b38f 100644
--- a/jstests/auth2.js
+++ b/jstests/auth2.js
@@ -2,4 +2,8 @@
// SERVER-724
db.runCommand({logout : 1});
-db.runCommand({logout : 1});
+x = db.runCommand({logout : 1});
+assert.eq( 1 , x.ok , "A" )
+
+x = db.logout();
+assert.eq( 1 , x.ok , "B" )
diff --git a/jstests/bench_test1.js b/jstests/bench_test1.js
new file mode 100644
index 0000000..c32b37d
--- /dev/null
+++ b/jstests/bench_test1.js
@@ -0,0 +1,16 @@
+
+t = db.bench_test1;
+t.drop();
+
+t.insert( { _id : 1 , x : 1 } )
+t.insert( { _id : 2 , x : 1 } )
+
+ops = [
+ { op : "findOne" , ns : t.getFullName() , query : { _id : 1 } } ,
+ { op : "update" , ns : t.getFullName() , query : { _id : 1 } , update : { $inc : { x : 1 } } }
+]
+
+seconds = .7
+
+res = benchRun( { ops : ops , parallel : 2 , seconds : seconds , host : db.getMongo().host } )
+assert.lte( seconds * res.update , t.findOne( { _id : 1 } ).x , "A1" )
diff --git a/jstests/bench_test2.js b/jstests/bench_test2.js
new file mode 100644
index 0000000..4a69c9c
--- /dev/null
+++ b/jstests/bench_test2.js
@@ -0,0 +1,41 @@
+
+t = db.bench_test2
+t.drop();
+
+for ( i=0; i<100; i++ )
+ t.insert( { _id : i , x : 0 } );
+db.getLastError();
+
+res = benchRun( { ops : [ { ns : t.getFullName() ,
+ op : "update" ,
+ query : { _id : { "#RAND_INT" : [ 0 , 100 ] } } ,
+ update : { $inc : { x : 1 } } } ] ,
+ parallel : 2 ,
+ seconds : 1 ,
+ totals : true ,
+ host : db.getMongo().host } )
+printjson( res );
+
+sumsq = 0
+sum = 0
+
+min = 1000
+max = 0;
+t.find().forEach(
+ function(z){
+ sum += z.x;
+ sumsq += Math.pow( ( res.update / 100 ) - z.x , 2 );
+ min = Math.min( z.x , min );
+ max = Math.max( z.x , max );
+ }
+)
+
+avg = sum / 100
+std = Math.sqrt( sumsq / 100 )
+
+print( "Avg: " + avg )
+print( "Std: " + std )
+print( "Min: " + min )
+print( "Max: " + max )
+
+
diff --git a/jstests/big_object1.js b/jstests/big_object1.js
index be841e0..6bbe115 100644
--- a/jstests/big_object1.js
+++ b/jstests/big_object1.js
@@ -44,3 +44,5 @@ if ( db.adminCommand( "buildinfo" ).bits == 64 ){
else {
print( "skipping big_object1 b/c not 64-bit" )
}
+
+print("SUCCESS");
diff --git a/jstests/binData.js b/jstests/binData.js
new file mode 100644
index 0000000..3f03765
--- /dev/null
+++ b/jstests/binData.js
@@ -0,0 +1,14 @@
+
+var x = new BinData(3, "OEJTfmD8twzaj/LPKLIVkA==");
+assert.eq(x.hex(), "3842537e60fcb70cda8ff2cf28b21590", "bad hex");
+assert.eq(x.base64(), "OEJTfmD8twzaj/LPKLIVkA==", "bad base64");
+assert.eq(x.type, 3, "bad type");
+assert.eq(x.length(), 16, "bad length");
+
+x = new BinData(0, "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=");
+assert.eq(x.hex(), "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869732073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c20746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c652067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c20706c6561737572652e", "bad hex");
+assert.eq(x.base64(), "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=", "bad base64");
+assert.eq(x.type, 0, "bad type");
+assert.eq(x.length(), 269, "bad length");
+
+
diff --git a/jstests/capped.js b/jstests/capped.js
index bae7472..6fdc4df 100644
--- a/jstests/capped.js
+++ b/jstests/capped.js
@@ -1,11 +1,11 @@
db.jstests_capped.drop();
db.createCollection("jstests_capped", {capped:true, size:30000});
-assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count() );
+
+assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count(), "expected a count of zero indexes for new capped collection" );
t = db.jstests_capped;
t.save({x:1});
t.save({x:2});
-assert( t.find().sort({$natural:1})[0].x == 1 );
-assert( t.find().sort({$natural:-1})[0].x == 2 );
-
+assert( t.find().sort({$natural:1})[0].x == 1 , "expected obj.x==1");
+assert( t.find().sort({$natural:-1})[0].x == 2, "expected obj.x == 2");
diff --git a/jstests/capped2.js b/jstests/capped2.js
index 2d2f6a8..65bb82f 100644
--- a/jstests/capped2.js
+++ b/jstests/capped2.js
@@ -8,7 +8,7 @@ function debug( x ) {
var val = new Array( 2000 );
var c = "";
-for( i = 0; i < 2000; ++i, c += "-" ) {
+for( i = 0; i < 2000; ++i, c += "---" ) { // bigger and bigger objects through the array...
val[ i ] = { a: c };
}
@@ -47,16 +47,16 @@ function checkDecreasing( i ) {
for( i = 0 ;; ++i ) {
debug( "capped 2: " + i );
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
if ( tzz.count() == 0 ) {
- assert( i > 100, "K" );
- break;
+ assert( i > 100, "K" );
+ break;
}
checkIncreasing( i );
}
for( i = 600 ; i >= 0 ; --i ) {
debug( "capped 2: " + i );
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
checkDecreasing( i );
}
diff --git a/jstests/capped5.js b/jstests/capped5.js
index 1c7ec3d..be6c27d 100644
--- a/jstests/capped5.js
+++ b/jstests/capped5.js
@@ -4,12 +4,11 @@ tn = "capped5"
t = db[tn]
t.drop();
+
db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
t.insert( { _id : 5 , x : 11 , z : 52 } );
-
assert.eq( 0 , t.getIndexKeys().length , "A0" )
assert.eq( 52 , t.findOne( { x : 11 } ).z , "A1" );
-assert.eq( 52 , t.findOne( { _id : 5 } ).z , "A2" );
t.ensureIndex( { _id : 1 } )
t.ensureIndex( { x : 1 } )
@@ -41,10 +40,10 @@ t.ensureIndex( { x:1 }, {unique:true, dropDups:true } );
assert.eq( 1, db.system.indexes.count( {ns:"test."+tn} ) );
assert.eq( 2, t.find().hint( {x:1} ).toArray().length );
-// SERVER-525
+// SERVER-525 (closed) unique indexes in capped collection
t.drop();
db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.ensureIndex( { _id:1 } );
+t.ensureIndex( { _id:1 } ); // note we assume will be automatically unique because it is _id
t.insert( { _id : 5 , x : 11 } );
t.insert( { _id : 5 , x : 12 } );
assert.eq( 1, t.find().toArray().length );
diff --git a/jstests/capped6.js b/jstests/capped6.js
index 6579807..098f667 100644
--- a/jstests/capped6.js
+++ b/jstests/capped6.js
@@ -52,7 +52,7 @@ var max = 0;
*/
function doTest() {
for( var i = max; i < oldMax; ++i ) {
- tzz.save( val[ i ] );
+ tzz.insert( val[ i ] );
}
max = oldMax;
count = tzz.count();
diff --git a/jstests/capped8.js b/jstests/capped8.js
index cce0eec..e5b28dc 100644
--- a/jstests/capped8.js
+++ b/jstests/capped8.js
@@ -9,25 +9,39 @@ function debug( x ) {
}
/** Generate an object with a string field of specified length */
-function obj( size ) {
- return {a:new Array( size + 1 ).toString()};;
+function obj( size, x ) {
+ return {X:x, a:new Array( size + 1 ).toString()};;
}
function withinOne( a, b ) {
assert( Math.abs( a - b ) <= 1, "not within one: " + a + ", " + b )
}
+var X = 0;
+
/**
* Insert enough documents of the given size spec that the collection will
* contain only documents having this size spec.
*/
-function insertMany( size ) {
+function insertManyRollingOver( objsize ) {
// Add some variability, as the precise number can trigger different cases.
- n = 250 + Random.randInt( 10 );
+ X++;
+ n = 250 + Random.randInt(10);
+
+ assert(t.count() == 0 || t.findOne().X != X);
+
for( i = 0; i < n; ++i ) {
- t.save( obj( size ) );
+ t.save( obj( objsize, X ) );
debug( t.count() );
}
+
+ if (t.findOne().X != X) {
+ printjson(t.findOne());
+ print("\n\nERROR didn't roll over in insertManyRollingOver " + objsize);
+ print("approx amountwritten: " + (objsize * n));
+ printjson(t.stats());
+ assert(false);
+ }
}
/**
@@ -37,10 +51,10 @@ function insertMany( size ) {
function insertAndTruncate( first ) {
myInitialCount = t.count();
// Insert enough documents to make the capped allocation loop over.
- insertMany( 50 );
+ insertManyRollingOver( 150 );
myFiftyCount = t.count();
// Insert documents that are too big to fit in the smaller extents.
- insertMany( 2000 );
+ insertManyRollingOver( 5000 );
myTwokCount = t.count();
if ( first ) {
initialCount = myInitialCount;
@@ -69,18 +83,24 @@ function testTruncate() {
insertAndTruncate( false );
}
+var pass = 1;
+
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000 ] } );
+db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 4000 ] } );
testTruncate();
+print("pass " + pass++);
t.drop();
db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } );
testTruncate();
diff --git a/jstests/capped9.js b/jstests/capped9.js
new file mode 100644
index 0000000..9ea506c
--- /dev/null
+++ b/jstests/capped9.js
@@ -0,0 +1,28 @@
+
+t = db.capped9;
+t.drop();
+
+db.createCollection("capped9" , {capped:true, size:1024*50 });
+
+t.insert( { _id : 1 , x : 2 , y : 3 } )
+
+assert.eq( 1 , t.find( { x : 2 } ).itcount() , "A1" )
+assert.eq( 1 , t.find( { y : 3 } ).itcount() , "A2" )
+//assert.throws( function(){ t.find( { _id : 1 } ).itcount(); } , [] , "A3" ); // SERVER-3064
+
+t.update( { _id : 1 } , { $set : { y : 4 } } )
+//assert( db.getLastError() , "B1" ); // SERVER-3064
+//assert.eq( 3 , t.findOne().y , "B2" ); // SERVER-3064
+
+t.ensureIndex( { _id : 1 } )
+
+assert.eq( 1 , t.find( { _id : 1 } ).itcount() , "D1" )
+
+t.update( { _id : 1 } , { $set : { y : 4 } } )
+assert( null == db.getLastError() , "D1: " + tojson( db.getLastError() ) )
+assert.eq( 4 , t.findOne().y , "D2" )
+
+
+
+
+
diff --git a/jstests/cappeda.js b/jstests/cappeda.js
new file mode 100644
index 0000000..4a4b14a
--- /dev/null
+++ b/jstests/cappeda.js
@@ -0,0 +1,33 @@
+
+t = db.scan_capped_id;
+t.drop()
+
+x = t.runCommand( "create" , { capped : true , size : 10000 } )
+assert( x.ok )
+
+for ( i=0; i<100; i++ )
+ t.insert( { _id : i , x : 1 } )
+
+function q() {
+ return t.findOne( { _id : 5 } )
+}
+
+function u() {
+ t.update( { _id : 5 } , { $set : { x : 2 } } );
+ var gle = db.getLastError();
+ if ( gle )
+ throw gle;
+}
+
+
+// SERVER-3064
+//assert.throws( q , [] , "A1" );
+//assert.throws( u , [] , "B1" );
+
+t.ensureIndex( { _id : 1 } )
+
+assert.eq( 1 , q().x )
+q()
+u()
+
+assert.eq( 2 , q().x )
diff --git a/jstests/compact.js b/jstests/compact.js
new file mode 100644
index 0000000..b12b03f
--- /dev/null
+++ b/jstests/compact.js
@@ -0,0 +1,37 @@
+// compact.js
+
+t = db.compacttest;
+t.drop();
+t.insert({ x: 3 });
+t.insert({ x: 3 });
+t.insert({ x: 5 });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.insert({ x: 4, z: 2, k: 'aaa' });
+t.ensureIndex({ x: 1 });
+
+print("1");
+
+var res = db.runCommand({ compact: 'compacttest', dev: true });
+printjson(res);
+assert(res.ok);
+assert(t.count() == 9);
+var v = t.validate(true);
+assert(v.ok);
+assert(v.extentCount == 1);
+assert(v.deletedCount == 1);
+assert(t.getIndexes().length == 2);
+
+print("2");
+
+// works on an empty collection?
+t.remove({});
+assert(db.runCommand({ compact: 'compacttest', dev: true }).ok);
+assert(t.count() == 0);
+v = t.validate(true);
+assert(v.ok);
+assert(v.extentCount == 1);
+assert(t.getIndexes().length == 2);
diff --git a/jstests/compact_speed_test.js b/jstests/compact_speed_test.js
new file mode 100755
index 0000000..0c4b9d5
--- /dev/null
+++ b/jstests/compact_speed_test.js
@@ -0,0 +1,61 @@
+if (1) {
+
+ t = db.compactspeedtest;
+ t.drop();
+
+ var obj = { x: 1, y: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", z: [1, 2] };
+
+ var start = new Date();
+ function timed() {
+ db.getLastError();
+ var dt = (new Date()) - start;
+ //print("time: " + dt);
+ start = new Date();
+ return dt;
+ }
+
+ //print("adding data");
+ var N = 100000;
+ if (db.adminCommand("buildInfo").debug)
+ N = 10000;
+ for (var i = 0; i < N; i++) {
+ obj.x = i;
+ obj.z[1] = i;
+ t.insert(obj);
+ }
+ var a = timed();
+
+ //print("index");
+ t.ensureIndex({ x: 1 });
+ //print("index");
+ t.ensureIndex({ y: 1 });
+ //print("index");
+ t.ensureIndex({ z: 1 });
+
+ a += timed();
+
+ //print("count:" + t.count());
+
+ timed();
+
+ {
+ //print("compact");
+ var res = db.runCommand({ compact: 'compactspeedtest', dev: true });
+ b = timed();
+ //printjson(res);
+ assert(res.ok);
+
+ //print("validate");
+ var v = t.validate(true);
+
+ assert(v.ok);
+ assert(t.getIndexes().length == 4);
+
+ if (b < a) {
+ // consider making this fail/assert
+ print("\n\n\nwarning WARNING compact command was slower than it should be");
+ print("a:" + a + " b:" + b);
+ print("\n\n\n");
+ }
+ }
+}
diff --git a/jstests/date1.js b/jstests/date1.js
index ca2e616..e6fc147 100644
--- a/jstests/date1.js
+++ b/jstests/date1.js
@@ -4,11 +4,14 @@ t = db.date1;
function go( d , msg ){
t.drop();
- t.save( { a : 1 , d : d } );
+ t.save({ a: 1, d: d });
+// printjson(d);
+// printjson(t.findOne().d);
assert.eq( d , t.findOne().d , msg )
}
go( new Date() , "A" )
go( new Date( 1 ) , "B")
go( new Date( 0 ) , "C (old spidermonkey lib fails this test)")
+go(new Date(-10), "neg")
diff --git a/jstests/date2.js b/jstests/date2.js
new file mode 100644
index 0000000..94eb58e
--- /dev/null
+++ b/jstests/date2.js
@@ -0,0 +1,13 @@
+// Check that it's possible to compare a Date to a Timestamp - SERVER-3304
+// Check Date / Timestamp comparison equivalence - SERVER-3222
+
+t = db.jstests_date2;
+t.drop();
+
+t.ensureIndex( {a:1} );
+
+t.save( {a:new Timestamp()} );
+
+if ( 0 ) { // SERVER-3304
+assert.eq( 1, t.find( {a:{$gt:new Date(0)}} ).itcount() );
+} \ No newline at end of file
diff --git a/jstests/date3.js b/jstests/date3.js
new file mode 100644
index 0000000..e7ddf71
--- /dev/null
+++ b/jstests/date3.js
@@ -0,0 +1,31 @@
+// Check dates before Unix epoch - SERVER-405
+
+t = db.date3;
+t.drop()
+
+d1 = new Date(-1000)
+dz = new Date(0)
+d2 = new Date(1000)
+
+t.save( {x: 3, d: dz} )
+t.save( {x: 2, d: d2} )
+t.save( {x: 1, d: d1} )
+
+function test () {
+ var list = t.find( {d: {$lt: dz}} )
+ assert.eq ( 1, list.size() )
+ assert.eq ( 1, list[0].x )
+ assert.eq ( d1, list[0].d )
+ var list = t.find( {d: {$gt: dz}} )
+ assert.eq ( 1, list.size() )
+ assert.eq ( 2, list[0].x )
+ var list = t.find().sort( {d:1} )
+ assert.eq ( 3, list.size() )
+ assert.eq ( 1, list[0].x )
+ assert.eq ( 3, list[1].x )
+ assert.eq ( 2, list[2].x )
+}
+
+test()
+t.ensureIndex( {d: 1} )
+test()
diff --git a/jstests/dbcase.js b/jstests/dbcase.js
index 21854d8..25c0bca 100644
--- a/jstests/dbcase.js
+++ b/jstests/dbcase.js
@@ -1,6 +1,5 @@
+// Check db name duplication constraint SERVER-2111
-/*
-TODO SERVER-2111
a = db.getSisterDB( "dbcasetest_dbnamea" )
b = db.getSisterDB( "dbcasetest_dbnameA" )
@@ -15,11 +14,16 @@ b.foo.save( { x : 1 } )
z = db.getLastErrorObj();
assert.eq( 13297 , z.code || 0 , "B : " + tojson(z) )
-print( db.getMongo().getDBNames() )
+assert.neq( -1, db.getMongo().getDBNames().indexOf( a.getName() ) );
+assert.eq( -1, db.getMongo().getDBNames().indexOf( b.getName() ) );
+printjson( db.getMongo().getDBs().databases );
a.dropDatabase();
b.dropDatabase();
-print( db.getMongo().getDBNames() )
-*/
-
+ai = db.getMongo().getDBNames().indexOf( a.getName() );
+bi = db.getMongo().getDBNames().indexOf( b.getName() );
+// One of these dbs may exist if there is a slave active, but they must
+// not both exist.
+assert( ai == -1 || bi == -1 );
+printjson( db.getMongo().getDBs().databases );
diff --git a/jstests/dbcase2.js b/jstests/dbcase2.js
new file mode 100644
index 0000000..57e43bc
--- /dev/null
+++ b/jstests/dbcase2.js
@@ -0,0 +1,9 @@
+// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but differently cased name.
+
+a = db.getSisterDB( "dbcasetest_dbnamea" )
+b = db.getSisterDB( "dbcasetest_dbnameA" )
+
+a.c.count();
+assert.throws( function() { b.c.count() } );
+
+assert.eq( -1, db.getMongo().getDBNames().indexOf( "dbcasetest_dbnameA" ) );
diff --git a/jstests/dbhash.js b/jstests/dbhash.js
index e9cbc94..7fea4b4 100644
--- a/jstests/dbhash.js
+++ b/jstests/dbhash.js
@@ -14,16 +14,22 @@ db.getCollectionNames().forEach( function( x ) {
}
} );
+function dbhash( mydb ) {
+ var ret = mydb.runCommand( "dbhash" );
+ assert.commandWorked( ret, "dbhash failure" );
+ return ret;
+}
+
function gh( coll , mydb ){
if ( ! mydb ) mydb = db;
- var x = mydb.runCommand( "dbhash" ).collections[coll.getName()];
+ var x = dbhash( mydb ).collections[coll.getName()];
if ( ! x )
return "";
return x;
}
function dbh( mydb ){
- return mydb.runCommand( "dbhash" ).md5;
+ return dbhash( mydb ).md5;
}
assert.eq( gh( a ) , gh( b ) , "A1" );
diff --git a/jstests/delx.js b/jstests/delx.js
index 3f8c88c..aa858e9 100644
--- a/jstests/delx.js
+++ b/jstests/delx.js
@@ -23,6 +23,7 @@ x.next();
y.next();
a.foo.remove( { _id : { $gt : 50 } } );
+db.getLastError();
assert.eq( 51 , a.foo.find().itcount() , "B1" )
assert.eq( 100 , b.foo.find().itcount() , "B2" )
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 3b65bd0..c29dea0 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -22,6 +22,8 @@ checkDir = function( dir ) {
files = listFiles( dir + baseName );
for( f in files ) {
+ if ( files[f].isDirectory )
+ continue;
assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) , "B dir:" + dir + " f: " + f );
}
}
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 26b707d..eddb300 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -20,10 +20,16 @@ if ( doIt ) {
port = allocatePorts( 1 )[ 0 ];
m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" )
+ d = m.getDB( "diskfulltest" );
+ c = d.getCollection( "diskfulltest" );
c.save( { a: 6 } );
+ assert.eq(d.getLastError(), "new file allocation failure"); // first fail
assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" );
assert.isnull( c.findOne() , "shouldn't exist" );
+ c.save( { a: 6 } );
+ assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail
+
+
sleep( 3000 );
m2 = new Mongo( m.host );
printjson( m2.getDBs() );
diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js
index 944ad1c..57ae179 100644
--- a/jstests/disk/newcollection.js
+++ b/jstests/disk/newcollection.js
@@ -3,11 +3,21 @@
port = allocatePorts( 1 )[ 0 ]
var baseName = "jstests_disk_newcollection";
var m = startMongod( "--noprealloc", "--smallfiles", "--port", port, "--dbpath", "/data/db/" + baseName );
+//var m = db.getMongo();
db = m.getDB( "test" );
-db.createCollection( baseName, {size:15.9*1024*1024} );
-db.baseName.drop();
+var t = db[baseName];
-size = m.getDBs().totalSize;
-db.baseName.save( {} );
-assert.eq( size, m.getDBs().totalSize );
+for (var pass = 0; pass <= 1; pass++) {
+
+ db.createCollection(baseName, { size: 15.8 * 1024 * 1024 });
+ if( pass == 0 )
+ t.drop();
+
+ size = m.getDBs().totalSize;
+ t.save({});
+ assert.eq(size, m.getDBs().totalSize);
+ assert(size <= 32 * 1024 * 1024);
+
+ t.drop();
+}
diff --git a/jstests/disk/norepeat.js b/jstests/disk/norepeat.js
index d9f1cd3..985fc36 100644
--- a/jstests/disk/norepeat.js
+++ b/jstests/disk/norepeat.js
@@ -45,7 +45,7 @@ assert.throws( function() { c.next() }, [], "unexpected: object found" );
m.getDB( "local" ).getCollectionNames().forEach( function( x ) { assert( !x.match( /^temp/ ), "temp collection found" ); } );
t.drop();
-m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIdIndex:false } );
+m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIndexId:false } );
t = m.getDB( baseName ).getCollection( baseName );
t.insert( {_id:"a"} );
t.insert( {_id:"a"} );
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
new file mode 100644
index 0000000..d93e5ea
--- /dev/null
+++ b/jstests/disk/quota.js
@@ -0,0 +1,47 @@
+// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local' database).
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_disk_quota";
+dbpath = "/data/db/" + baseName;
+
+m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" );
+db = m.getDB( baseName );
+
+big = new Array( 10000 ).toString();
+
+// Insert documents until quota is exhausted.
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
+}
+printjson( db.getLastError() );
+
+dotTwoDataFile = dbpath + "/" + baseName + ".2";
+files = listFiles( dbpath );
+for( i in files ) {
+ // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated (SERVER-3410) but no .2 file is expected.
+ assert.neq( dotTwoDataFile, files[ i ].name );
+}
+
+dotTwoDataFile = dbpath + "/" + "local" + ".2";
+// Check that quota does not apply to local db, and a .2 file can be created.
+l = m.getDB( "local" )[ baseName ];
+for( i = 0; i < 10000; ++i ) {
+ l.save( {b:big} );
+ assert( !db.getLastError() );
+ dotTwoFound = false;
+ if ( i % 100 != 0 ) {
+ continue;
+ }
+ files = listFiles( dbpath );
+ for( f in files ) {
+ if ( files[ f ].name == dotTwoDataFile ) {
+ dotTwoFound = true;
+ }
+ }
+ if ( dotTwoFound ) {
+ break;
+ }
+}
+
+assert( dotTwoFound );
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
new file mode 100644
index 0000000..c0d30df
--- /dev/null
+++ b/jstests/disk/quota2.js
@@ -0,0 +1,38 @@
+// Test for quotaFiles off by one file limit issue - SERVER-3420.
+
+if ( 0 ) { // SERVER-3420
+
+port = allocatePorts( 1 )[ 0 ];
+
+baseName = "jstests_disk_quota2";
+dbpath = "/data/db/" + baseName;
+
+m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" );
+db = m.getDB( baseName );
+
+big = new Array( 10000 ).toString();
+
+// Insert documents until quota is exhausted.
+while( !db.getLastError() ) {
+ db[ baseName ].save( {b:big} );
+}
+
+db.resetError();
+
+// Trigger allocation of an additional file for a 'special' namespace.
+for( n = 0; !db.getLastError(); ++n ) {
+ db.createCollection( '' + n );
+}
+
+print( n );
+
+// Check that new docs are saved in the .0 file.
+for( i = 0; i < n; ++i ) {
+ c = db[ ''+i ];
+ c.save( {b:big} );
+ if( !db.getLastError() ) {
+ assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file );
+ }
+}
+
+} \ No newline at end of file
diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js
index c986dce..9e6767c 100644
--- a/jstests/disk/repair3.js
+++ b/jstests/disk/repair3.js
@@ -1,4 +1,4 @@
-// test --repairpath on aother partition
+// test --repairpath on another partition
var baseName = "jstests_disk_repair3";
var repairbase = "/data/db/repairpartitiontest"
diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js
new file mode 100644
index 0000000..65da330
--- /dev/null
+++ b/jstests/disk/repair5.js
@@ -0,0 +1,43 @@
+// SERVER-2351 Test killop with repair command.
+
+var baseName = "jstests_disk_repair5";
+
+port = allocatePorts( 1 )[ 0 ];
+dbpath = "/data/db/" + baseName + "/";
+repairpath = dbpath + "repairDir/"
+
+resetDbpath( dbpath );
+resetDbpath( repairpath );
+
+m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( baseName );
+
+big = new Array( 5000 ).toString();
+for( i = 0; i < 20000; ++i ) {
+ db[ baseName ].save( {i:i,b:big} );
+}
+
+function killRepair() {
+ while( 1 ) {
+ p = db.currentOp().inprog;
+ for( var i in p ) {
+ var o = p[ i ];
+ printjson( o );
+ // Find the active 'repairDatabase' op and kill it.
+ if ( o.active && o.query.repairDatabase ) {
+ db.killOp( o.opid );
+ return;
+ }
+ }
+ }
+}
+
+s = startParallelShell( killRepair.toString() + "; killRepair();" );
+
+// Repair should fail due to killOp.
+assert.commandFailed( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+
+s();
+
+assert.eq( 20000, db[ baseName ].find().itcount() );
+assert( db[ baseName ].validate().valid );
diff --git a/jstests/distinct1.js b/jstests/distinct1.js
index 5e47400..1b9354f 100644
--- a/jstests/distinct1.js
+++ b/jstests/distinct1.js
@@ -25,3 +25,4 @@ t.save( { a : { b : "c" } , c : 12 } );
res = t.distinct( "a.b" );
assert.eq( "a,b,c" , res.toString() , "B1" );
+assert.eq( "BasicCursor" , t._distinct( "a.b" ).stats.cursor , "B2" )
diff --git a/jstests/distinct_index1.js b/jstests/distinct_index1.js
index 8677457..64dc280 100644
--- a/jstests/distinct_index1.js
+++ b/jstests/distinct_index1.js
@@ -48,3 +48,13 @@ x = d( "b" , { a : { $gt : 5 } } );
assert.eq( 398 , x.stats.n , "BC1" )
assert.eq( 398 , x.stats.nscanned , "BC2" )
assert.eq( 398 , x.stats.nscannedObjects , "BC3" )
+
+// Check proper nscannedObjects count when using a query optimizer cursor.
+t.dropIndexes();
+t.ensureIndex( { a : 1, b : 1 } );
+x = d( "b" , { a : { $gt : 5 }, b : { $gt : 5 } } );
+assert.eq( "QueryOptimizerCursor", x.stats.cursor );
+assert.eq( 171 , x.stats.n )
+assert.eq( 275 , x.stats.nscanned )
+// Disable temporarily - exact value doesn't matter.
+// assert.eq( 266 , x.stats.nscannedObjects )
diff --git a/jstests/drop2.js b/jstests/drop2.js
index a1d619d..87e646e 100644
--- a/jstests/drop2.js
+++ b/jstests/drop2.js
@@ -26,7 +26,7 @@ function op( drop ) {
return null;
}
-s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { ; } } } )" );
+s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { sleep( 1 ); } } } )" );
countOp = null;
assert.soon( function() { countOp = op( false ); return countOp; } );
diff --git a/jstests/drop3.js b/jstests/drop3.js
new file mode 100644
index 0000000..b2ca94a
--- /dev/null
+++ b/jstests/drop3.js
@@ -0,0 +1,29 @@
+t = db.jstests_drop3;
+sub = t.sub;
+
+t.drop();
+sub.drop();
+
+
+for (var i = 0; i < 10; i++){
+ t.insert({});
+ sub.insert({});
+}
+
+var cursor = t.find().batchSize(2);
+var subcursor = sub.find().batchSize(2);
+
+cursor.next();
+subcursor.next();
+assert.eq( cursor.objsLeftInBatch(), 1 );
+assert.eq( subcursor.objsLeftInBatch(), 1 );
+
+t.drop(); // should invalidate cursor, but not subcursor
+db.getLastError();
+
+assert.throws( function(){ cursor.itcount() } ); // throws "cursor doesn't exist on server" error on getMore
+assert.eq( subcursor.itcount(), 9 ); //one already seen
+
+
+
+
diff --git a/jstests/dropdb.js b/jstests/dropdb.js
new file mode 100644
index 0000000..0b83884
--- /dev/null
+++ b/jstests/dropdb.js
@@ -0,0 +1,17 @@
+// Test that a db does not exist after it is dropped.
+// Disabled in the small oplog suite because the slave may create a master db
+// with the same name as the dropped db when requesting a clone.
+
+m = db.getMongo();
+baseName = "jstests_dropdb";
+ddb = db.getSisterDB( baseName );
+
+ddb.c.save( {} );
+ddb.getLastError();
+assert.neq( -1, m.getDBNames().indexOf( baseName ) );
+
+ddb.dropDatabase();
+assert.eq( -1, m.getDBNames().indexOf( baseName ) );
+
+ddb.dropDatabase();
+assert.eq( -1, m.getDBNames().indexOf( baseName ) );
diff --git a/jstests/dropdb_race.js b/jstests/dropdb_race.js
new file mode 100644
index 0000000..bff7980
--- /dev/null
+++ b/jstests/dropdb_race.js
@@ -0,0 +1,44 @@
+// test dropping a db with simultaneous commits
+
+m = db.getMongo();
+baseName = "jstests_dur_droprace";
+d = db.getSisterDB(baseName);
+t = d.foo;
+
+assert(d.adminCommand({ setParameter: 1, syncdelay: 5 }).ok);
+
+var s = 0;
+
+var start = new Date();
+
+for (var pass = 0; pass < 100; pass++) {
+ if (pass % 2 == 0) {
+ // sometimes wait for create db first, to vary the timing of things
+ t.insert({});
+ if( pass % 4 == 0 )
+ d.runCommand({getLastError:1,j:1});
+ else
+ d.getLastError();
+ }
+ t.insert({ x: 1 });
+ t.insert({ x: 3 });
+ t.ensureIndex({ x: 1 });
+ sleep(s);
+ if (pass % 37 == 0)
+ d.adminCommand("closeAllDatabases");
+ else if (pass % 13 == 0)
+ t.drop();
+ else if (pass % 17 == 0)
+ t.dropIndexes();
+ else
+ d.dropDatabase();
+ if (pass % 7 == 0)
+ d.runCommand({getLastError:1,j:1});
+ d.getLastError();
+ s = (s + 1) % 25;
+ //print(pass);
+ if ((new Date()) - start > 60000) {
+ print("stopping early");
+ break;
+ }
+}
diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js
index f169f06..9131aed 100644
--- a/jstests/dur/closeall.js
+++ b/jstests/dur/closeall.js
@@ -8,7 +8,9 @@ function f() {
var ourdb = "closealltest";
print("closeall.js start mongod variant:" + variant);
- var options = (new Date()-0)%2==0 ? 8 : 0;
+ var R = (new Date()-0)%2;
+ var QuickCommits = (new Date()-0)%3 == 0;
+ var options = R==0 ? 8 : 0; // 8 is DurParanoid
print("closeall.js --durOptions " + options);
var N = 1000;
if (options)
@@ -23,6 +25,10 @@ function f() {
// we'll use two connections to make a little parallelism
var db1 = conn.getDB(ourdb);
var db2 = new Mongo(db1.getMongo().host).getDB(ourdb);
+ if( QuickCommits ) {
+ print("closeall.js QuickCommits variant (using a small syncdelay)");
+ assert( db2.adminCommand({setParameter:1, syncdelay:5}).ok );
+ }
print("closeall.js run test");
@@ -34,9 +40,9 @@ function f() {
db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 });
if (i % 100 == 0)
db1.foo.find();
- if( i == 800 )
+ if( i == 800 )
db1.foo.ensureIndex({ x: 1 });
- var res = null;
+ var res = null;
try {
if( variant == 1 )
sleep(0);
@@ -44,37 +50,37 @@ function f() {
sleep(1);
else if( variant == 3 && i % 10 == 0 )
print(i);
- res = db2.adminCommand("closeAllDatabases");
- }
- catch (e) {
- sleep(5000); // sleeping a little makes console output order prettier
- print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
- try {
- print("getlasterror:");
- printjson(db2.getLastErrorObj());
- print("trying one more closealldatabases:");
- res = db2.adminCommand("closeAllDatabases");
- printjson(res);
- }
- catch (e) {
- print("got another exception : " + e);
- }
- print("\n\n\n");
- // sleep a little to capture possible mongod output?
- sleep(2000);
- throw e;
- }
- assert( res.ok, "closeAllDatabases res.ok=false");
- }
-
- print("closeall.js end test loop. slave.foo.count:");
- print(slave.foo.count());
-
- print("closeall.js shutting down servers");
- stopMongod(30002);
- stopMongod(30001);
-}
-
-f();
+ res = db2.adminCommand("closeAllDatabases");
+ }
+ catch (e) {
+ sleep(5000); // sleeping a little makes console output order prettier
+ print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i);
+ try {
+ print("getlasterror:");
+ printjson(db2.getLastErrorObj());
+ print("trying one more closealldatabases:");
+ res = db2.adminCommand("closeAllDatabases");
+ printjson(res);
+ }
+ catch (e) {
+ print("got another exception : " + e);
+ }
+ print("\n\n\n");
+ // sleep a little to capture possible mongod output?
+ sleep(2000);
+ throw e;
+ }
+ assert( res.ok, "closeAllDatabases res.ok=false");
+ }
+
+ print("closeall.js end test loop. slave.foo.count:");
+ print(slave.foo.count());
+
+ print("closeall.js shutting down servers");
+ stopMongod(30002);
+ stopMongod(30001);
+}
+
+f();
sleep(500);
print("SUCCESS closeall.js");
diff --git a/jstests/dur/data/empty.bson b/jstests/dur/data/empty.bson
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/jstests/dur/data/empty.bson
diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js
index da45c20..c123ea1 100644
--- a/jstests/dur/diskfull.js
+++ b/jstests/dur/diskfull.js
@@ -14,23 +14,23 @@ for ( i in files ) {
if ( !doIt ) {
print( "path " + startPath + " missing, skipping diskfull test" );
doIt = false;
-}
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
- }
-}
+}
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */
function clear() {
@@ -56,7 +56,9 @@ function work() {
d.foo.insert( { _id:i, b:big } );
}
- d.getLastError();
+ gle = d.getLastError();
+ if ( gle )
+ throw gle;
} catch ( e ) {
print( e );
raise( e );
@@ -86,9 +88,8 @@ function runFirstMongodAndFillDisk() {
conn = startMongodNoReset("--port", 30001, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc");
assert.throws( work, null, "no exception thrown when exceeding disk capacity" );
- waitMongoProgramOnPort( 30001 );
-
- // the above wait doesn't work on windows
+ stopMongod( 30001 );
+
sleep(5000);
}
@@ -104,9 +105,9 @@ function runSecondMongdAndRecover() {
// stopMongod seems to be asynchronous (hmmm) so we sleep here.
sleep(5000);
- // at this point, after clean shutdown, there should be no journal files
- log("check no journal files");
- checkNoJournalFiles(startPath + "/journal/");
+ // at this point, after clean shutdown, there should be no journal files
+ log("check no journal files");
+ checkNoJournalFiles(startPath + "/journal/");
log();
}
@@ -133,4 +134,4 @@ if ( doIt ) {
print(testname + " SUCCESS");
-} \ No newline at end of file
+}
diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js
index 7f82cd7..2aabd4a 100644
--- a/jstests/dur/dropdb.js
+++ b/jstests/dur/dropdb.js
@@ -73,21 +73,28 @@ function verify() {
var d = conn.getDB("test");
var count = d.foo.count();
if (count != 1) {
- print("going to fail, count mismatch in verify()");
+ print("going to fail, test.foo.count() != 1 in verify()");
sleep(10000); // easier to read the output this way
print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count);
- print(d.foo.count() + "\n\n\n");
+ print(d.foo.count() + "\n\n\n");
assert(false);
}
assert(d.foo.findOne()._id == 100, "100");
print("dropdb.js teste.foo.findOne:");
- printjson(conn.getDB("teste").foo.findOne());
-
- var teste = conn.getDB("teste");
- print("dropdb count " + teste.foo.count());
+ printjson(conn.getDB("teste").foo.findOne());
+
+ var teste = conn.getDB("teste");
+ var testecount = teste.foo.count();
+ if (testecount != 1) {
+ print("going to fail, teste.foo.count() != 1 in verify()");
+ sleep(10000); // easier to read the output this way
+ print("\n\n\ndropdb.js FAIL teste.foo.count() should be 1 but is : " + testecount);
+ print("\n\n\n");
+ assert(false);
+ }
+ print("teste.foo.count() = " + teste.foo.count());
assert(teste.foo.findOne()._id == 99, "teste");
-
}
if (debugging) {
diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js
index 4c8f1bf..299ac30 100755
--- a/jstests/dur/dur1.js
+++ b/jstests/dur/dur1.js
@@ -75,7 +75,7 @@ function work() {
}
function verify() {
- log("verify");
+ log("verify test.foo.count == 2");
var d = conn.getDB("test");
var ct = d.foo.count();
if (ct != 2) {
@@ -99,37 +99,38 @@ var path1 = "/data/db/" + testname+"nodur";
var path2 = "/data/db/" + testname+"dur";
// non-durable version
-log();
+log("run mongod without journaling");
conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
work();
stopMongod(30000);
// durable version
-log();
-conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+log("run mongod with --journal");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
-// kill the process hard
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+log("kill 9");
stopMongod(30001, /*signal*/9);
// journal file should be present, and non-empty as we killed hard
// restart and recover
-log();
-conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
+log("restart mongod --journal and recover");
+conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
verify();
-log("stop");
+log("stop mongod");
stopMongod(30002);
// stopMongod seems to be asynchronous (hmmm) so we sleep here.
-sleep(5000);
+// sleep(5000);
// at this point, after clean shutdown, there should be no journal files
-log("check no journal files");
+log("check no journal files (after presumably clean shutdown)");
checkNoJournalFiles(path2 + "/journal");
log("check data matches ns");
diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js
new file mode 100755
index 0000000..5090b5b
--- /dev/null
+++ b/jstests/dur/dur1_tool.js
@@ -0,0 +1,152 @@
+/*
+ test durability option with tools (same a dur1.js but use mongorestore to do repair)
+*/
+
+var debugging = false;
+var testname = "dur1_tool";
+var step = 1;
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
+
+function runDiff(a, b) {
+ function reSlash(s) {
+ var x = s;
+ if (_isWindows()) {
+ while (1) {
+ var y = x.replace('/', '\\');
+ if (y == x)
+ break;
+ x = y;
+ }
+ }
+ return x;
+ }
+ a = reSlash(a);
+ b = reSlash(b);
+ print("diff " + a + " " + b);
+ return run("diff", a, b);
+}
+
+function log(str) {
+ print();
+ if(str)
+ print(testname+" step " + step++ + " " + str);
+ else
+ print(testname+" step " + step++);
+}
+
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// runs so we can't do a binary diff of the resulting files to check they are consistent.
+function work() {
+ log("work");
+ var d = conn.getDB("test");
+ d.foo.insert({ _id: 3, x: 22 });
+ d.foo.insert({ _id: 4, x: 22 });
+ d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
+ d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
+ d.a.update({ _id: 4 }, { $inc: { x: 1} });
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
+ d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 });
+
+// d.a.update({ _id: 4 }, { $inc: { x: 1} });
+// d.a.reIndex();
+
+ // assure writes applied in case we kill -9 on return from this function
+ d.getLastError();
+
+ log("endwork");
+ return d;
+}
+
+function verify() {
+ log("verify test.foo.count == 2");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
+ }
+}
+
+if( debugging ) {
+ // mongod already running in debugger
+ conn = db.getMongo();
+ work();
+ sleep(30000);
+ quit();
+}
+
+log();
+
+// directories
+var path1 = "/data/db/" + testname+"nodur";
+var path2 = "/data/db/" + testname+"dur";
+
+// non-durable version
+log("run mongod without journaling");
+conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles");
+work();
+stopMongod(30000);
+
+// durable version
+log("run mongod with --journal");
+conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8);
+work();
+
+// wait for group commit.
+printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+
+// kill the process hard
+log("kill 9");
+stopMongod(30001, /*signal*/9);
+
+// journal file should be present, and non-empty as we killed hard
+
+// mongorestore with --dbpath and --journal options should do a recovery pass
+// empty.bson is an empty file so it won't actually insert anything
+log("use mongorestore to recover");
+runMongoProgram("mongorestore", "--dbpath", path2, "--journal", "-d", "test", "-c", "empty", "jstests/dur/data/empty.bson");
+
+// stopMongod seems to be asynchronous (hmmm) so we sleep here.
+// sleep(5000);
+
+// at this point, after clean shutdown, there should be no journal files
+log("check no journal files (after presumably clean shutdown)");
+checkNoJournalFiles(path2 + "/journal");
+
+log("check data matches ns");
+var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.ns files differ");
+
+log("check data matches .0");
+var diff = runDiff(path1 + "/test.0", path2 + "/test.0");
+if (diff != "") {
+ print("\n\n\nDIFFERS\n");
+ print(diff);
+}
+assert(diff == "", "error test.0 files differ");
+
+log("check data matches done");
+
+print(testname + " SUCCESS");
+
diff --git a/jstests/dur/indexbg.js b/jstests/dur/indexbg.js
new file mode 100644
index 0000000..e78ae4a
--- /dev/null
+++ b/jstests/dur/indexbg.js
@@ -0,0 +1,7 @@
+path = '/data/db/indexbg_dur';
+
+m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles', '--journalOptions', 24 );
+t = m.getDB( 'test' ).test;
+t.save( {x:1} );
+t.createIndex( {x:1}, {background:true} );
+t.count();
diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js
new file mode 100644
index 0000000..6a0af24
--- /dev/null
+++ b/jstests/dur/indexbg2.js
@@ -0,0 +1,19 @@
+path = '/data/db/indexbg2_dur';
+
+m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles' );
+
+t = m.getDB( 'test' ).test;
+t.createIndex( {a:1} );
+t.createIndex( {b:1} );
+t.createIndex( {x:1}, {background:true} );
+for( var i = 0; i < 1000; ++i ) {
+ t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+ t.remove( {_id:i} );
+}
+sleep( 1000 );
+for( var i = 1000; i < 2000; ++i ) {
+ t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+ t.remove( {_id:i} );
+}
+t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
+assert( !t.getDB().getLastError() );
diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js
index 04e4318..79f72a4 100755
--- a/jstests/dur/manyRestart.js
+++ b/jstests/dur/manyRestart.js
@@ -116,6 +116,10 @@ conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallf
work();
stopMongod(30000);
+// hail mary for windows
+// Sat Jun 11 14:07:57 Error: boost::filesystem::create_directory: Access is denied: "\data\db\manyRestartsdur" (anon):1
+sleep(1000);
+
log("starting 30001");
conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8);
work();
diff --git a/jstests/eval_nolock.js b/jstests/eval_nolock.js
index 2688ec5..2ab96a3 100644
--- a/jstests/eval_nolock.js
+++ b/jstests/eval_nolock.js
@@ -10,7 +10,7 @@ res = db.runCommand( { eval :
db.eval_nolock.insert( { _id : 123 } );
return db.eval_nolock.count();
}
- , nlock : true } );
+ , nolock : true } );
assert.eq( 11 , res.retval , "A" )
diff --git a/jstests/evalb.js b/jstests/evalb.js
index 177930c..ea80331 100644
--- a/jstests/evalb.js
+++ b/jstests/evalb.js
@@ -11,7 +11,7 @@ db.setProfilingLevel( 2 );
assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "B" );
o = db.system.profile.find().sort( { $natural : -1 } ).limit(1).next();
-assert( o.info.indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
+assert( tojson(o).indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) )
db.setProfilingLevel( 0 );
diff --git a/jstests/evalc.js b/jstests/evalc.js
index 8a9e889..0320ecd 100644
--- a/jstests/evalc.js
+++ b/jstests/evalc.js
@@ -1,17 +1,24 @@
t = db.jstests_evalc;
t.drop();
+t2 = db.evalc_done
+t2.drop()
+
for( i = 0; i < 10; ++i ) {
t.save( {i:i} );
}
// SERVER-1610
-s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " )
+assert.eq( 0 , t2.count() , "X1" )
+
+s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<50000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); db.evalc_done.insert( { x : 1 } ); " )
print( "starting eval: " + Date() )
-for ( i=0; i<20000; i++ ){
+while ( true ) {
db.eval( "db.jstests_evalc.count( {i:10} );" );
+ if ( t2.count() > 0 )
+ break;
}
print( "end eval: " + Date() )
diff --git a/jstests/evald.js b/jstests/evald.js
index 78cabb6..7b18f3c 100644
--- a/jstests/evald.js
+++ b/jstests/evald.js
@@ -53,10 +53,10 @@ function doIt( ev, wait, where ) {
}
-doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", true, true );
-doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", false, true );
-doIt( "while( true ) {;}", false );
-doIt( "while( true ) {;}", true );
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", true, true );
+doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", false, true );
+doIt( "while( true ) { sleep(1);}", false );
+doIt( "while( true ) { sleep(1);}", true );
// the for loops are currently required, as a spawned op masks the parent op - see SERVER-1931
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( {i:10} ); }", true );
@@ -65,4 +65,4 @@ doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(); }", false );
doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} try { db.jstests_evald.count( {i:10} ); } catch ( e ) { } }", true );
-doIt( "while( 1 ) { try { while( 1 ) { ; } } catch ( e ) { } }", true );
+doIt( "while( 1 ) { try { while( 1 ) { sleep(1); } } catch ( e ) { } }", true );
diff --git a/jstests/exists3.js b/jstests/exists3.js
new file mode 100644
index 0000000..53a69d6
--- /dev/null
+++ b/jstests/exists3.js
@@ -0,0 +1,21 @@
+// Check exists with non empty document, based on SERVER-2470 example.
+
+t = db.jstests_exists3;
+t.drop();
+
+t.insert({a: 1, b: 2});
+
+assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
+assert.eq( 1, t.count({c: {$exists: false}}) );
+assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
+assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
+
+// now we have an index on the sort key
+t.ensureIndex({c: -1})
+
+assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
+assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
+// still ok without the $exists
+assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
+// and ok with a convoluted $not $exists
+assert.eq( 1, t.find({c: {$not: {$exists: true}}}).sort({c: -1}).itcount() );
diff --git a/jstests/exists4.js b/jstests/exists4.js
new file mode 100644
index 0000000..fb801ed
--- /dev/null
+++ b/jstests/exists4.js
@@ -0,0 +1,20 @@
+// Check various exists cases, based on SERVER-1735 example.
+
+t = db.jstests_exists4;
+t.drop();
+
+t.ensureIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1});
+t.insert({ date: new Date("08/27/2010"), tot_visit: 100});
+t.insert({ date: new Date("08/27/2010"), country_code: "IT", tot_visit: 77});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", tot_visit: 23});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "and...@spacca.org", tot_visit: 11});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@gmail.com", tot_visit: 5});
+t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@progloedizioni.com", tot_visit: 7});
+
+assert.eq( 6, t.find({date: new Date("08/27/2010")}).count() );
+assert.eq( 5, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}}).count() );
+assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: {$exists: false}}).count() );
+assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: null}).count() );
+assert.eq( 3, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}}).count() );
+assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: false}}).count() );
+assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null}).count() );
diff --git a/jstests/exists5.js b/jstests/exists5.js
new file mode 100644
index 0000000..a90a94f
--- /dev/null
+++ b/jstests/exists5.js
@@ -0,0 +1,33 @@
+// Test some $not/$exists cases.
+
+t = db.jstests_exists5;
+t.drop();
+
+t.save( {a:1} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
+assert.eq( 1, t.count( {'c.d':{$not:{$exists:true}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'c.d':{$not:{$exists:false}}} ) );
+
+t.drop();
+t.save( {a:{b:1}} );
+assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
+
+t.drop();
+t.save( {a:[1]} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
+
+t.drop();
+t.save( {a:[{b:1}]} );
+assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
diff --git a/jstests/exists6.js b/jstests/exists6.js
new file mode 100644
index 0000000..7c1cdc1
--- /dev/null
+++ b/jstests/exists6.js
@@ -0,0 +1,63 @@
+// SERVER-393 Test indexed matching with $exists.
+
+t = db.jstests_exists6;
+t.drop();
+
+t.ensureIndex( {b:1} );
+t.save( {} );
+t.save( {b:1} );
+t.save( {b:null} );
+
+checkExists = function( query ) {
+ // Constraint on 'b' is trivial, so a BasicCursor is the default cursor type.
+ assert.eq( 'BasicCursor', t.find( query ).explain().cursor );
+ // Index bounds include all elements.
+ assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).hint( {b:1} ).explain().indexBounds.b );
+ // All keys must be scanned.
+ assert.eq( 3, t.find( query ).hint( {b:1} ).explain().nscanned );
+ // 2 docs will match.
+ assert.eq( 2, t.find( query ).hint( {b:1} ).itcount() );
+}
+checkExists( {b:{$exists:true}} );
+checkExists( {b:{$not:{$exists:false}}} );
+
+checkMissing = function( query ) {
+ // Constraint on 'b' is nontrivial, so a BtreeCursor is the default cursor type.
+ assert.eq( 'BtreeCursor b_1', t.find( query ).explain().cursor );
+ // Scan null index keys.
+ assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b );
+ // Two existing null keys will be scanned.
+ assert.eq( 2, t.find( query ).explain().nscanned );
+ // One doc is missing 'b'.
+ assert.eq( 1, t.find( query ).hint( {b:1} ).itcount() );
+}
+checkMissing( {b:{$exists:false}} );
+checkMissing( {b:{$not:{$exists:true}}} );
+
+// Now check existence of second compound field.
+t.ensureIndex( {a:1,b:1} );
+t.save( {a:1} );
+t.save( {a:1,b:1} );
+t.save( {a:1,b:null} );
+
+checkExists = function( query ) {
+ // Index bounds include all elements.
+ assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).explain().indexBounds.b );
+ // All keys must be scanned.
+ assert.eq( 3, t.find( query ).explain().nscanned );
+ // 2 docs will match.
+ assert.eq( 2, t.find( query ).hint( {a:1,b:1} ).itcount() );
+}
+checkExists( {a:1,b:{$exists:true}} );
+checkExists( {a:1,b:{$not:{$exists:false}}} );
+
+checkMissing = function( query ) {
+ // Scan null index keys.
+ assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b );
+ // Two existing null keys will be scanned.
+ assert.eq( 2, t.find( query ).explain().nscanned );
+ // One doc is missing 'b'.
+ assert.eq( 1, t.find( query ).hint( {a:1,b:1} ).itcount() );
+}
+checkMissing( {a:1,b:{$exists:false}} );
+checkMissing( {a:1,b:{$not:{$exists:true}}} );
diff --git a/jstests/exists7.js b/jstests/exists7.js
new file mode 100644
index 0000000..14a9720
--- /dev/null
+++ b/jstests/exists7.js
@@ -0,0 +1,21 @@
+
+// Test that non boolean value types are allowed with $explain spec. SERVER-2322
+
+t = db.jstests_explain7;
+t.drop();
+
+function testIntegerExistsSpec() {
+ t.remove();
+ t.save( {} );
+ t.save( {a:1} );
+ t.save( {a:2} );
+ t.save( {a:3, b:3} );
+ t.save( {a:4, b:4} );
+
+ assert.eq( 2, t.count( {b:{$exists:1}} ) );
+ assert.eq( 3, t.count( {b:{$exists:0}} ) );
+}
+
+testIntegerExistsSpec();
+t.ensureIndex( {b:1} );
+testIntegerExistsSpec();
diff --git a/jstests/exists8.js b/jstests/exists8.js
new file mode 100644
index 0000000..82f0c45
--- /dev/null
+++ b/jstests/exists8.js
@@ -0,0 +1,76 @@
+// Test $exists with array element field names SERVER-2897
+
+t = db.jstests_exists8;
+t.drop();
+
+t.save( {a:[1]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[1,2]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{}]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{},{}]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[{'b':2},{'a':1}]} );
+assert.eq( 1, t.count( {'a.a':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1.a':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.a':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[1]]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.0':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.0.0':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.0.0.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.0.0':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[[1]]]} );
+assert.eq( 1, t.count( {'a.0.0.0':{$exists:true}} ) );
+
+t.remove();
+t.save( {a:[[{b:1}]]} );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 1, t.count( {'a.0.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 0, t.count( {'a.0.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 1, t.count( {'a.1.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 1, t.count( {'a.1.0.b':{$exists:true}} ) );
+assert.eq( 0, t.count( {'a.1.0.b':{$exists:false}} ) );
+
+t.remove();
+t.save( {a:[[],[{b:1}]]} );
+assert.eq( 0, t.count( {'a.1.1.b':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.1.1.b':{$exists:false}} ) );
diff --git a/jstests/exists9.js b/jstests/exists9.js
new file mode 100644
index 0000000..66378d1
--- /dev/null
+++ b/jstests/exists9.js
@@ -0,0 +1,41 @@
+// SERVER-393 Test exists with various empty array and empty object cases.
+
+t = db.jstests_exists9;
+t.drop();
+
+// Check existence of missing nested field.
+t.save( {a:{}} );
+assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
+assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+
+// With index.
+t.ensureIndex( {'a.b':1} );
+assert.eq( 1, t.find( {'a.b':{$exists:false}} ).hint( {'a.b':1} ).itcount() );
+assert.eq( 0, t.find( {'a.b':{$exists:true}} ).hint( {'a.b':1} ).itcount() );
+
+t.drop();
+
+// Check that an empty array 'exists'.
+t.save( {} );
+t.save( {a:[]} );
+assert.eq( 1, t.count( {a:{$exists:true}} ) );
+assert.eq( 1, t.count( {a:{$exists:false}} ) );
+
+// With index.
+t.ensureIndex( {a:1} );
+assert.eq( 1, t.find( {a:{$exists:true}} ).hint( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).explain().nscanned );
+
+t.drop();
+
+// Check that an indexed field within an empty array does not exist.
+t.save( {a:{'0':1}} );
+t.save( {a:[]} );
+assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
+assert.eq( 1, t.count( {'a.0':{$exists:false}} ) );
+
+// With index.
+t.ensureIndex( {'a.0':1} );
+assert.eq( 1, t.find( {'a.0':{$exists:true}} ).hint( {'a.0':1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{$exists:false}} ).hint( {'a.0':1} ).itcount() );
diff --git a/jstests/find8.js b/jstests/find8.js
new file mode 100644
index 0000000..2ec368b
--- /dev/null
+++ b/jstests/find8.js
@@ -0,0 +1,27 @@
+// SERVER-1932 Test unindexed matching of a range that is only valid in a multikey context.
+
+t = db.jstests_find8;
+t.drop();
+
+t.save( {a:[1,10]} );
+assert.eq( 1, t.count( { a: { $gt:2,$lt:5} } ) );
+
+// Check that we can do a query with 'invalid' range.
+assert.eq( 1, t.count( { a: { $gt:5,$lt:2} } ) );
+
+t.save( {a:[-1,12]} );
+
+// Check that we can do a query with 'invalid' range and sort.
+assert.eq( 1, t.find( { a: { $gt:5,$lt:2} } ).sort( {a:1} ).toArray()[ 0 ].a[ 0 ] );
+assert.eq( 2, t.find( { a: { $gt:5,$lt:2} } ).sort( {$natural:-1} ).itcount() );
+
+// SERVER-2864
+if( 0 ) {
+t.find( { a: { $gt:5,$lt:2} } ).itcount();
+// Check that we can record a plan for an 'invalid' range.
+assert( t.find( { a: { $gt:5,$lt:2} } ).explain( true ).oldPlan );
+}
+
+t.ensureIndex( {b:1} );
+// Check that if we do a table scan of an 'invalid' range in an or clause we don't check subsequent clauses.
+assert.eq( "BasicCursor", t.find( { $or:[{ a: { $gt:5,$lt:2} }, {b:1}] } ).explain().cursor ); \ No newline at end of file
diff --git a/jstests/find_and_modify2.js b/jstests/find_and_modify2.js
index 108fc0f..2c8ab5b 100644
--- a/jstests/find_and_modify2.js
+++ b/jstests/find_and_modify2.js
@@ -8,3 +8,9 @@ assert.eq(out, {_id:1, i:1});
out = t.findAndModify({update: {$inc: {i:1}}, fields: {i:0}});
assert.eq(out, {_id:1, j:0});
+
+out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}});
+assert.eq(out, {j:0});
+
+out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}, 'new': true});
+assert.eq(out, {j:0});
diff --git a/jstests/fsync.js b/jstests/fsync.js
index fccd623..134d558 100644
--- a/jstests/fsync.js
+++ b/jstests/fsync.js
@@ -1,22 +1,21 @@
// test the lock/unlock snapshotting feature a bit
-x=db.runCommand({fsync:1,lock:1});
+x=db.runCommand({fsync:1,lock:1}); // not on admin db
assert(!x.ok,"D");
-d=db.getSisterDB("admin");
-
-x=d.runCommand({fsync:1,lock:1});
+x=db.fsyncLock(); // uses admin automatically
assert(x.ok,"C");
-y = d.currentOp();
+y = db.currentOp();
assert(y.fsyncLock,"B");
-z = d.$cmd.sys.unlock.findOne();
+z = db.fsyncUnlock();
+assert( db.currentOp().fsyncLock == null, "A2" );
-// it will take some time to unlock, and unlock does not block and wait for that
-// doing a write will make us wait until db is writeable.
+// make sure the db is unlocked
db.jstests_fsync.insert({x:1});
+db.getLastError();
-assert( d.currentOp().fsyncLock == null, "A" );
+assert( db.currentOp().fsyncLock == null, "A" );
diff --git a/jstests/geo10.js b/jstests/geo10.js
new file mode 100644
index 0000000..39da09f
--- /dev/null
+++ b/jstests/geo10.js
@@ -0,0 +1,21 @@
+// Test for SERVER-2746
+
+coll = db.geo10
+coll.drop();
+
+db.geo10.ensureIndex( { c : '2d', t : 1 }, { min : 0, max : Math.pow( 2, 40 ) } )
+assert( db.getLastError() == null, "B" )
+assert( db.system.indexes.count({ ns : "test.geo10" }) == 2, "A3" )
+
+printjson( db.system.indexes.find().toArray() )
+
+db.geo10.insert( { c : [ 1, 1 ], t : 1 } )
+assert.eq( db.getLastError(), null, "C" )
+
+db.geo10.insert( { c : [ 3600, 3600 ], t : 1 } )
+assert( db.getLastError() == null, "D" )
+
+db.geo10.insert( { c : [ 0.001, 0.001 ], t : 1 } )
+assert( db.getLastError() == null, "E" )
+
+printjson( db.geo10.find({ c : { $within : { $box : [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]] } }, t : 1 }).toArray() )
diff --git a/jstests/geo4.js b/jstests/geo4.js
index 73b4020..78404ab 100644
--- a/jstests/geo4.js
+++ b/jstests/geo4.js
@@ -4,7 +4,7 @@ t.drop();
t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } );
t.ensureIndex( { loc : "2d" }, { bits : 33 } );
-assert.eq( db.getLastError() , "can't have more than 32 bits in geo index" , "a" );
+assert.eq( db.getLastError() , "bits in geo index must be between 1 and 32" , "a" );
t.ensureIndex( { loc : "2d" }, { bits : 32 } );
assert( !db.getLastError(), "b" );
diff --git a/jstests/geo_array0.js b/jstests/geo_array0.js
new file mode 100644
index 0000000..2d69611
--- /dev/null
+++ b/jstests/geo_array0.js
@@ -0,0 +1,25 @@
+// Make sure the very basics of geo arrays are sane by creating a few multi location docs
+
+t = db.geoarray
+t.drop();
+
+t.insert( { zip : "10001", loc : { home : [ 10, 10 ], work : [ 50, 50 ] } } )
+t.insert( { zip : "10002", loc : { home : [ 20, 20 ], work : [ 50, 50 ] } } )
+t.insert( { zip : "10003", loc : { home : [ 30, 30 ], work : [ 50, 50 ] } } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { loc : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", loc : { home : [ 40, 40 ], work : [ 50, 50 ] } } )
+
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+printjson( t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
diff --git a/jstests/geo_array1.js b/jstests/geo_array1.js
new file mode 100644
index 0000000..56b7c85
--- /dev/null
+++ b/jstests/geo_array1.js
@@ -0,0 +1,30 @@
+// Make sure many locations in one doc works, in the form of an array
+
+t = db.geoarray1
+t.drop();
+
+var locObj = []
+
+// Add locations everywhere
+for ( var i = 0; i < 10; i++ ) {
+ for ( var j = 0; j < 10; j++ ) {
+ if ( j % 2 == 0 )
+ locObj.push( [ i, j ] )
+ else
+ locObj.push( { x : i, y : j } )
+ }
+}
+
+// Add docs with all these locations
+for( var i = 0; i < 300; i++ ){
+ t.insert( { loc : locObj } )
+}
+t.ensureIndex( { loc : "2d" } )
+
+// Pull them back
+for ( var i = 0; i < 10; i++ ) {
+ for ( var j = 0; j < 10; j++ ) {
+ assert.eq( 300, t.find( { loc : { $within : { $box : [ [ i - 0.5, j - 0.5 ], [ i + 0.5, j + 0.5 ] ] } } } )
+ .count() )
+ }
+}
diff --git a/jstests/geo_array2.js b/jstests/geo_array2.js
new file mode 100644
index 0000000..28cb152
--- /dev/null
+++ b/jstests/geo_array2.js
@@ -0,0 +1,163 @@
+// Check the semantics of near calls with multiple locations
+
+t = db.geoarray2
+t.drop();
+
+var numObjs = 10;
+var numLocs = 100;
+
+// Test the semantics of near / nearSphere / etc. queries with multiple keys per object
+
+for( var i = -1; i < 2; i++ ){
+ for(var j = -1; j < 2; j++ ){
+
+ locObj = []
+
+ if( i != 0 || j != 0 )
+ locObj.push( { x : i * 50 + Random.rand(),
+ y : j * 50 + Random.rand() } )
+ locObj.push( { x : Random.rand(),
+ y : Random.rand() } )
+ locObj.push( { x : Random.rand(),
+ y : Random.rand() } )
+
+ t.insert({ name : "" + i + "" + j , loc : locObj , type : "A" })
+ t.insert({ name : "" + i + "" + j , loc : locObj , type : "B" })
+ }
+}
+
+t.ensureIndex({ loc : "2d" , type : 1 })
+
+assert.isnull( db.getLastError() )
+
+print( "Starting testing phase... ")
+
+for( var t = 0; t < 2; t++ ){
+
+var type = t == 0 ? "A" : "B"
+
+for( var i = -1; i < 2; i++ ){
+ for(var j = -1; j < 2; j++ ){
+
+ var center = [ i * 50 , j * 50 ]
+ var count = i == 0 && j == 0 ? 2 * 9 : 1
+ var objCount = i == 0 && j == 0 ? 2 : 1
+
+ // Do near check
+
+ var nearResults = db.runCommand( { geoNear : "geoarray2" ,
+ near : center ,
+ num : count,
+ query : { type : type } } ).results
+ //printjson( nearResults )
+
+ var objsFound = {}
+ var lastResult = 0;
+ for( var k = 0; k < nearResults.length; k++ ){
+
+ // All distances should be small, for the # of results
+ assert.gt( 1.5 , nearResults[k].dis )
+ // Distances should be increasing
+ assert.lte( lastResult, nearResults[k].dis )
+ // Objs should be of the right type
+ assert.eq( type, nearResults[k].obj.type )
+
+ lastResult = nearResults[k].dis
+
+ var objKey = "" + nearResults[k].obj._id
+
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+
+ }
+
+ // Make sure we found the right objects each time
+ // Note: Multiple objects could be found for diff distances.
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+ // Do nearSphere check
+
+ // Earth Radius
+ var eRad = 6371
+
+ nearResults = db.geoarray2.find( { loc : { $nearSphere : center , $maxDistance : 500 /* km */ / eRad }, type : type } ).toArray()
+
+ assert.eq( nearResults.length , count )
+
+ objsFound = {}
+ lastResult = 0;
+ for( var k = 0; k < nearResults.length; k++ ){
+ var objKey = "" + nearResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+
+ }
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+
+ // Within results do not return duplicate documents
+
+ var count = i == 0 && j == 0 ? 9 : 1
+ var objCount = i == 0 && j == 0 ? 1 : 1
+
+ // Do within check
+ objsFound = {}
+
+ var box = [ [center[0] - 1, center[1] - 1] , [center[0] + 1, center[1] + 1] ]
+
+ //printjson( box )
+
+ var withinResults = db.geoarray2.find({ loc : { $within : { $box : box } } , type : type }).toArray()
+
+ assert.eq( withinResults.length , count )
+
+ for( var k = 0; k < withinResults.length; k++ ){
+ var objKey = "" + withinResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+ }
+
+ //printjson( objsFound )
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+ // Do within check (circle)
+ objsFound = {}
+
+ withinResults = db.geoarray2.find({ loc : { $within : { $center : [ center, 1.5 ] } } , type : type }).toArray()
+
+ assert.eq( withinResults.length , count )
+
+ for( var k = 0; k < withinResults.length; k++ ){
+ var objKey = "" + withinResults[k]._id
+ if( objKey in objsFound ) objsFound[ objKey ]++
+ else objsFound[ objKey ] = 1
+ }
+
+ // Make sure we found the right objects each time
+ for( var q in objsFound ){
+ assert.eq( objCount , objsFound[q] )
+ }
+
+
+
+ }
+}
+
+}
+
+
+
+
diff --git a/jstests/geo_borders.js b/jstests/geo_borders.js
index 85ffe35..9e8788a 100644
--- a/jstests/geo_borders.js
+++ b/jstests/geo_borders.js
@@ -1,10 +1,7 @@
-
t = db.borders
t.drop()
-// FIXME: FAILS for all epsilon < 1
-epsilon = 1
-//epsilon = 0.99
+epsilon = 0.0001;
// For these tests, *required* that step ends exactly on max
min = -1
@@ -12,9 +9,9 @@ max = 1
step = 1
numItems = 0;
-for(var x = min; x <= max; x += step){
- for(var y = min; y <= max; y += step){
- t.insert({ loc: { x : x, y : y } })
+for ( var x = min; x <= max; x += step ) {
+ for ( var y = min; y <= max; y += step ) {
+ t.insert( { loc : { x : x, y : y } } )
numItems++;
}
}
@@ -23,167 +20,149 @@ overallMin = -1
overallMax = 1
// Create a point index slightly smaller than the points we have
-t.ensureIndex({ loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2})
-assert(db.getLastError(), "A1")
+t.ensureIndex( { loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2 } )
+assert( db.getLastError() )
-// FIXME: FAILS for all epsilon < 1
// Create a point index only slightly bigger than the points we have
-t.ensureIndex({ loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon })
-assert.isnull(db.getLastError(), "A2")
-
-
-
-
-
-
-
+t.ensureIndex( { loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon } )
+assert.isnull( db.getLastError() )
-//************
+// ************
// Box Tests
-//************
-
-
-/*
-// FIXME: Fails w/ non-nice error
-// Make sure we can get all points in full bounds
-assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon,
- overallMin - epsilon],
- [overallMax + epsilon,
- overallMax + epsilon]] } } }).count(), "B1");
-*/
-
-// Make sure an error is thrown if the bounds are bigger than the box itself
-// TODO: Do we really want an error in this case? Shouldn't we just clip the box?
-try{
- t.findOne({ loc : { $within : { $box : [[overallMin - 2 * epsilon,
- overallMin - 2 * epsilon],
- [overallMax + 2 * epsilon,
- overallMax + 2 * epsilon]] } } });
- assert(false, "B2");
-}
-catch(e){}
-
-//Make sure we can get at least close to the bounds of the index
-assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon / 2,
- overallMin - epsilon / 2],
- [overallMax + epsilon / 2,
- overallMax + epsilon / 2]] } } }).count(), "B3");
-
-
-//**************
-//Circle tests
-//**************
-
-center = (overallMax + overallMin) / 2
-center = [center, center]
+// ************
+
+// If the bounds are bigger than the box itself, just clip at the borders
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - 2 * epsilon, overallMin - 2 * epsilon ],
+ [ overallMax + 2 * epsilon, overallMax + 2 * epsilon ] ] } } } ).count() );
+
+// Check this works also for bounds where only a single dimension is off-bounds
+assert.eq( numItems - 5, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - 2 * epsilon, overallMin - 0.5 * epsilon ],
+ [ overallMax - epsilon, overallMax - epsilon ] ] } } } ).count() );
+
+// Make sure we can get at least close to the bounds of the index
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ],
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+
+// Make sure we can get at least close to the bounds of the index
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
+
+// Check that swapping min/max has good behavior
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
+
+assert.eq( numItems, t.find(
+ { loc : { $within : { $box : [
+ [ overallMax + epsilon / 2, overallMin - epsilon / 2 ],
+ [ overallMin - epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+
+// **************
+// Circle tests
+// **************
+
+center = ( overallMax + overallMin ) / 2
+center = [ center, center ]
radius = overallMax
-offCenter = [center[0] + radius, center[1] + radius]
-onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon]
-offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon]
-
+offCenter = [ center[0] + radius, center[1] + radius ]
+onBounds = [ offCenter[0] + epsilon, offCenter[1] + epsilon ]
+offBounds = [ onBounds[0] + epsilon, onBounds[1] + epsilon ]
+onBoundsNeg = [ -onBounds[0], -onBounds[1] ]
-//Make sure we can get all points when radius is exactly at full bounds
-assert(0 < t.find({ loc : { $within : { $center : [center, radius + epsilon] } } }).count(), "C1");
+// Make sure we can get all points when radius is exactly at full bounds
+assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + epsilon ] } } } ).count() );
-//Make sure we can get points when radius is over full bounds
-assert(0 < t.find({ loc : { $within : { $center : [center, radius + 2 * epsilon] } } }).count(), "C2");
+// Make sure we can get points when radius is over full bounds
+assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + 2 * epsilon ] } } } ).count() );
-//Make sure we can get points when radius is over full bounds, off-centered
-assert(0 < t.find({ loc : { $within : { $center : [offCenter, radius + 2 * epsilon] } } }).count(), "C3");
+// Make sure we can get points when radius is over full bounds, off-centered
+assert.lt( 0, t.find( { loc : { $within : { $center : [ offCenter, radius + 2 * epsilon ] } } } ).count() );
-//Make sure we get correct corner point when center is in bounds
+// Make sure we get correct corner point when center is in bounds
// (x bounds wrap, so could get other corner)
-cornerPt = t.findOne({ loc : { $within : { $center : [offCenter, step / 2] } } });
-assert(cornerPt.loc.y == overallMax, "C4")
+cornerPt = t.findOne( { loc : { $within : { $center : [ offCenter, step / 2 ] } } } );
+assert.eq( cornerPt.loc.y, overallMax )
-/*
-// FIXME: FAILS, returns opposite corner
// Make sure we get correct corner point when center is on bounds
-cornerPt = t.findOne({ loc : { $within : { $center : [onBounds,
- Math.sqrt(2 * epsilon * epsilon) + (step / 2) ] } } });
-assert(cornerPt.loc.y == overallMax, "C5")
-*/
+// NOTE: Only valid points on MIN bounds
+cornerPt = t
+ .findOne( { loc : { $within : { $center : [ onBoundsNeg, Math.sqrt( 2 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+assert.eq( cornerPt.loc.y, overallMin )
-// TODO: Handle gracefully?
// Make sure we can't get corner point when center is over bounds
-try{
- t.findOne({ loc : { $within : { $center : [offBounds,
- Math.sqrt(8 * epsilon * epsilon) + (step / 2) ] } } });
- assert(false, "C6")
+try {
+ t.findOne( { loc : { $within : { $center : [ offBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-
-
-
-
-
-//***********
-//Near tests
-//***********
-
-//Make sure we can get all nearby points to point in range
-assert(t.find({ loc : { $near : offCenter } }).next().loc.y == overallMax,
- "D1");
-
-/*
-// FIXME: FAILS, returns opposite list
-// Make sure we can get all nearby points to point on boundary
-assert(t.find({ loc : { $near : onBounds } }).next().loc.y == overallMax,
- "D2");
-*/
-
-//TODO: Could this work?
-//Make sure we can't get all nearby points to point over boundary
-try{
- t.findOne({ loc : { $near : offBounds } })
- assert(false, "D3")
+// Make sure we can't get corner point when center is on max bounds
+try {
+ t.findOne( { loc : { $within : { $center : [ onBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-/*
-// FIXME: FAILS, returns only single point
-//Make sure we can get all nearby points within one step (4 points in top corner)
-assert(4 == t.find({ loc : { $near : offCenter, $maxDistance : step * 1.9 } }).count(),
- "D4");
-*/
+// ***********
+// Near tests
+// ***********
+// Make sure we can get all nearby points to point in range
+assert.eq( overallMax, t.find( { loc : { $near : offCenter } } ).next().loc.y );
-//**************
-//Command Tests
-//**************
+// Make sure we can get all nearby points to point on boundary
+assert.eq( overallMin, t.find( { loc : { $near : onBoundsNeg } } ).next().loc.y );
+// Make sure we can't get all nearby points to point over boundary
+try {
+ t.findOne( { loc : { $near : offBounds } } )
+ assert( false )
+} catch (e) {
+}
+// Make sure we can't get all nearby points to point on max boundary
+try {
+ t.findOne( { loc : { $near : onBoundsNeg } } )
+ assert( false )
+} catch (e) {
+}
-//Make sure we can get all nearby points to point in range
-assert(db.runCommand({ geoNear : "borders", near : offCenter }).results[0].obj.loc.y == overallMax,
- "E1");
+// Make sure we can get all nearby points within one step (4 points in top
+// corner)
+assert.eq( 4, t.find( { loc : { $near : offCenter, $maxDistance : step * 1.9 } } ).count() );
+// **************
+// Command Tests
+// **************
+// Make sure we can get all nearby points to point in range
+assert.eq( overallMax, db.runCommand( { geoNear : "borders", near : offCenter } ).results[0].obj.loc.y );
-/*
-// FIXME: FAILS, returns opposite list
-//Make sure we can get all nearby points to point on boundary
-assert(db.runCommand({ geoNear : "borders", near : onBounds }).results[0].obj.loc.y == overallMax,
- "E2");
-*/
+// Make sure we can get all nearby points to point on boundary
+assert.eq( overallMin, db.runCommand( { geoNear : "borders", near : onBoundsNeg } ).results[0].obj.loc.y );
-//TODO: Could this work?
-//Make sure we can't get all nearby points to point over boundary
-try{
- db.runCommand({ geoNear : "borders", near : offBounds }).results.length
- assert(false, "E3")
+// Make sure we can't get all nearby points to point over boundary
+try {
+ db.runCommand( { geoNear : "borders", near : offBounds } ).results.length
+ assert( false )
+} catch (e) {
}
-catch(e){}
-
-
-/*
-// FIXME: Fails, returns one point
-//Make sure we can get all nearby points within one step (4 points in top corner)
-assert(4 == db.runCommand({ geoNear : "borders", near : offCenter, maxDistance : step * 1.5 }).results.length,
- "E4");
-*/
-
+// Make sure we can't get all nearby points to point on max boundary
+try {
+ db.runCommand( { geoNear : "borders", near : onBounds } ).results.length
+ assert( false )
+} catch (e) {
+}
+// Make sure we can get all nearby points within one step (4 points in top
+// corner)
+assert.eq( 4, db.runCommand( { geoNear : "borders", near : offCenter, maxDistance : step * 1.5 } ).results.length );
diff --git a/jstests/geo_center_sphere2.js b/jstests/geo_center_sphere2.js
new file mode 100644
index 0000000..c9c5fbb
--- /dev/null
+++ b/jstests/geo_center_sphere2.js
@@ -0,0 +1,158 @@
+//
+// Tests the error handling of spherical queries
+// along with multi-location documents.
+// This is necessary since the error handling must manage
+// multiple documents, and so requires simultaneous testing.
+//
+
+var numTests = 30
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ //var fixedTest = 6017
+ //if( fixedTest ) test = fixedTest
+
+ Random.srand( 1337 + test );
+
+ var radius = 5000 * Random.rand() // km
+ radius = radius / 6371 // radians
+ var numDocs = Math.floor( 400 * Random.rand() )
+ // TODO: Wrapping uses the error value to figure out what would overlap...
+ var bits = Math.floor( 5 + Random.rand() * 28 )
+ var maxPointsPerDoc = 50
+
+ t = db.sphere
+
+ var randomPoint = function() {
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ];
+ }
+
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var startPoint
+ var ex = null
+ do {
+
+ t.drop()
+ startPoint = randomPoint()
+ t.ensureIndex( { loc : "2d" }, { bits : bits } )
+
+ try {
+ // Check for wrapping issues
+ t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).toArray()
+ ex = null
+ } catch (e) {
+ ex = e
+ }
+ } while (ex)
+
+ var pointsIn = 0
+ var pointsOut = 0
+ var docsIn = 0
+ var docsOut = 0
+ var totalPoints = 0
+
+ //var point = randomPoint()
+
+ for ( var i = 0; i < numDocs; i++ ) {
+
+ var numPoints = Math.floor( Random.rand() * maxPointsPerDoc + 1 )
+ var docIn = false
+ var multiPoint = []
+
+ totalPoints += numPoints
+
+ for ( var p = 0; p < numPoints; p++ ) {
+ var point = randomPoint()
+ multiPoint.push( point )
+
+ if ( Geo.sphereDistance( startPoint, point ) <= radius ) {
+ pointsIn++
+ docIn = true
+ } else {
+ pointsOut++
+ }
+ }
+
+ t.insert( { loc : multiPoint } )
+
+ if ( docIn )
+ docsIn++
+ else
+ docsOut++
+
+ }
+
+ printjson( { test: test,
+ radius : radius, bits : bits, numDocs : numDocs, pointsIn : pointsIn, docsIn : docsIn, pointsOut : pointsOut,
+ docsOut : docsOut } )
+
+ assert.isnull( db.getLastError() )
+ assert.eq( docsIn + docsOut, numDocs )
+ assert.eq( pointsIn + pointsOut, totalPoints )
+
+ // $centerSphere
+ assert.eq( docsIn, t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).count() )
+
+ // $nearSphere
+ var results = t.find( { loc : { $nearSphere : startPoint, $maxDistance : radius } } ).limit( 2 * pointsIn )
+ .toArray()
+
+ assert.eq( pointsIn, results.length )
+
+ var distance = 0;
+ for ( var i = 0; i < results.length; i++ ) {
+
+ var minNewDistance = radius + 1
+ for( var j = 0; j < results[i].loc.length; j++ ){
+ var newDistance = Geo.sphereDistance( startPoint, results[i].loc[j] )
+ if( newDistance < minNewDistance && newDistance >= distance ) minNewDistance = newDistance
+ }
+
+ //print( "Dist from : " + results[i].loc[j] + " to " + startPoint + " is "
+ // + minNewDistance + " vs " + radius )
+
+ assert.lte( minNewDistance, radius )
+ assert.gte( minNewDistance, distance )
+ distance = minNewDistance
+
+ }
+
+ // geoNear
+ var results = db.runCommand( {
+ geoNear : "sphere", near : startPoint, maxDistance : radius, num : 2 * pointsIn, spherical : true } ).results
+
+ /*
+ printjson( results );
+
+ for ( var j = 0; j < results[0].obj.loc.length; j++ ) {
+ var newDistance = Geo.sphereDistance( startPoint, results[0].obj.loc[j] )
+ if( newDistance <= radius ) print( results[0].obj.loc[j] + " : " + newDistance )
+ }
+ */
+
+ assert.eq( pointsIn, results.length )
+
+ var distance = 0;
+ for ( var i = 0; i < results.length; i++ ) {
+ var retDistance = results[i].dis
+
+ // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
+ // + retDistance + " vs " + radius )
+
+ var distInObj = false
+ for ( var j = 0; j < results[i].obj.loc.length && distInObj == false; j++ ) {
+ var newDistance = Geo.sphereDistance( startPoint, results[i].obj.loc[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.lte( retDistance, radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ //break;
+}
+
+
diff --git a/jstests/geo_distinct.js b/jstests/geo_distinct.js
new file mode 100644
index 0000000..60e0d15
--- /dev/null
+++ b/jstests/geo_distinct.js
@@ -0,0 +1,16 @@
+// Test distinct with geo queries SERVER-2135
+
+t = db.commits
+t.drop()
+
+t.save( { _id : ObjectId( "4ce63ec2f360622431000013" ), loc : [ 55.59664, 13.00156 ], author : "FredrikL" } )
+
+printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { loc : '2d' } )
+
+printjson( t.getIndexes() )
+
+printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) )
+assert.isnull( db.getLastError() ) \ No newline at end of file
diff --git a/jstests/geo_fiddly_box.js b/jstests/geo_fiddly_box.js
new file mode 100644
index 0000000..2a9cf49
--- /dev/null
+++ b/jstests/geo_fiddly_box.js
@@ -0,0 +1,44 @@
+// Reproduces simple test for SERVER-2832
+
+// The setup to reproduce was/is to create a set of points where the
+// "expand" portion of the geo-lookup expands the 2d range in only one
+// direction (so points are required on either side of the expanding range)
+
+db.geo_fiddly_box.drop();
+db.geo_fiddly_box.ensureIndex({ loc : "2d" })
+
+db.geo_fiddly_box.insert({ "loc" : [3, 1] })
+db.geo_fiddly_box.insert({ "loc" : [3, 0.5] })
+db.geo_fiddly_box.insert({ "loc" : [3, 0.25] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.01] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.25] })
+db.geo_fiddly_box.insert({ "loc" : [3, -0.5] })
+db.geo_fiddly_box.insert({ "loc" : [3, -1] })
+
+// OK!
+print( db.geo_fiddly_box.count() )
+assert.eq( 7, db.geo_fiddly_box.count({ "loc" : { "$within" : { "$box" : [ [2, -2], [46, 2] ] } } }), "Not all locations found!" );
+
+
+// Test normal lookup of a small square of points as a sanity check.
+
+epsilon = 0.0001;
+min = -1
+max = 1
+step = 1
+numItems = 0;
+
+db.geo_fiddly_box2.drop()
+db.geo_fiddly_box2.ensureIndex({ loc : "2d" }, { max : max + epsilon / 2, min : min - epsilon / 2 })
+
+for(var x = min; x <= max; x += step){
+ for(var y = min; y <= max; y += step){
+ db.geo_fiddly_box2.insert({ "loc" : { x : x, y : y } })
+ numItems++;
+ }
+}
+
+assert.eq( numItems, db.geo_fiddly_box2.count({ loc : { $within : { $box : [[min - epsilon / 3,
+ min - epsilon / 3],
+ [max + epsilon / 3,
+ max + epsilon / 3]] } } }), "Not all locations found!");
diff --git a/jstests/geo_fiddly_box2.js b/jstests/geo_fiddly_box2.js
new file mode 100644
index 0000000..0588abf
--- /dev/null
+++ b/jstests/geo_fiddly_box2.js
@@ -0,0 +1,32 @@
+// Reproduces simple test for SERVER-2115
+
+// The setup to reproduce is to create a set of points and a really big bounds so that we are required to do
+// exact lookups on the points to get correct results.
+
+t = db.geo_fiddly_box2
+t.drop()
+
+t.insert( { "letter" : "S", "position" : [ -3, 0 ] } )
+t.insert( { "letter" : "C", "position" : [ -2, 0 ] } )
+t.insert( { "letter" : "R", "position" : [ -1, 0 ] } )
+t.insert( { "letter" : "A", "position" : [ 0, 0 ] } )
+t.insert( { "letter" : "B", "position" : [ 1, 0 ] } )
+t.insert( { "letter" : "B", "position" : [ 2, 0 ] } )
+t.insert( { "letter" : "L", "position" : [ 3, 0 ] } )
+t.insert( { "letter" : "E", "position" : [ 4, 0 ] } )
+
+t.ensureIndex( { position : "2d" } )
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
+
+t.dropIndex( { position : "2d" } )
+t.ensureIndex( { position : "2d" }, { min : -10000000, max : 10000000 } )
+
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
+
+t.dropIndex( { position : "2d" } )
+t.ensureIndex( { position : "2d" }, { min : -1000000000, max : 1000000000 } )
+
+result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } )
+assert.eq( 4, result.count() )
diff --git a/jstests/geo_group.js b/jstests/geo_group.js
new file mode 100644
index 0000000..4e038f9
--- /dev/null
+++ b/jstests/geo_group.js
@@ -0,0 +1,35 @@
+t = db.geo_group;
+t.drop();
+
+n = 1;
+for ( var x=-100; x<100; x+=2 ){
+ for ( var y=-100; y<100; y+=2 ){
+ t.insert( { _id : n++ , loc : [ x , y ] } )
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+// Test basic count with $near
+assert.eq(t.find().count(), 10000);
+assert.eq(t.find( { loc : { $within : {$center : [[56,8], 10]}}}).count(), 81);
+assert.eq(t.find( { loc : { $near : [56, 8, 10] } } ).count(), 81);
+
+// Test basic group that effectively does a count
+assert.eq(
+ t.group( {
+ reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} },
+ initial : { sums:{count:0} } }
+ ),
+ [ { "sums" : { "count" : 10000 } } ]
+);
+
+// Test basic group + $near that does a count
+assert.eq(
+ t.group( {
+ reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} },
+ initial : { sums:{count:0} },
+ cond : { loc : { $near : [56, 8, 10] } } }
+ ),
+ [ { "sums" : { "count" : 81 } } ]
+);
diff --git a/jstests/geo_mapreduce.js b/jstests/geo_mapreduce.js
new file mode 100644
index 0000000..a6ecf76
--- /dev/null
+++ b/jstests/geo_mapreduce.js
@@ -0,0 +1,56 @@
+// Test script from SERVER-1742
+
+// MongoDB test script for mapreduce with geo query
+
+// setup test collection
+db.apples.drop()
+db.apples.insert( { "geo" : { "lat" : 32.68331909, "long" : 69.41610718 }, "apples" : 5 } );
+db.apples.insert( { "geo" : { "lat" : 35.01860809, "long" : 70.92027283 }, "apples" : 2 } );
+db.apples.insert( { "geo" : { "lat" : 31.11639023, "long" : 64.19970703 }, "apples" : 11 } );
+db.apples.insert( { "geo" : { "lat" : 32.64500046, "long" : 69.36251068 }, "apples" : 4 } );
+db.apples.insert( { "geo" : { "lat" : 33.23638916, "long" : 69.81360626 }, "apples" : 9 } );
+db.apples.ensureIndex( { "geo" : "2d" } );
+
+center = [ 32.68, 69.41 ];
+radius = 10 / 111; // 10km; 1 arcdegree ~= 111km
+geo_query = { geo : { '$within' : { '$center' : [ center, radius ] } } };
+
+// geo query on collection works fine
+res = db.apples.find( geo_query );
+assert.eq( 2, res.count() );
+
+// map function
+m = function() {
+ emit( null, { "apples" : this.apples } );
+};
+
+// reduce function
+r = function(key, values) {
+ var total = 0;
+ for ( var i = 0; i < values.length; i++ ) {
+ total += values[i].apples;
+ }
+ return { "apples" : total };
+};
+
+// mapreduce without geo query works fine
+res = db.apples.mapReduce( m, r, { out : { inline : 1 } } );
+
+printjson( res )
+total = res.results[0];
+assert.eq( 31, total.value.apples );
+
+// mapreduce with regular query works fine too
+res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : { apples : { '$lt' : 9 } } } );
+total = res.results[0];
+assert.eq( 11, total.value.apples );
+
+// mapreduce with geo query gives error on mongodb version 1.6.2
+// uncaught exception: map reduce failed: {
+// "assertion" : "manual matcher config not allowed",
+// "assertionCode" : 13285,
+// "errmsg" : "db assertion failure",
+// "ok" : 0 }
+res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : geo_query } );
+total = res.results[0];
+assert.eq( 9, total.value.apples );
diff --git a/jstests/geo_mapreduce2.js b/jstests/geo_mapreduce2.js
new file mode 100644
index 0000000..9c39345
--- /dev/null
+++ b/jstests/geo_mapreduce2.js
@@ -0,0 +1,36 @@
+// Geo mapreduce 2 from SERVER-3478
+
+var coll = db.geoMR2
+coll.drop()
+
+for( var i = 0; i < 300; i++ )
+ coll.insert({ i : i, location : [ 10, 20 ] })
+
+coll.ensureIndex({ location : "2d" })
+
+// map function
+m = function() {
+ emit( null, { count : this.i } )
+}
+
+// reduce function
+r = function( key, values ) {
+
+ var total = 0
+ for ( var i = 0; i < values.length; i++ ) {
+ total += values[i].count
+ }
+
+ return { count : total }
+};
+
+try{ coll.mapReduce( m, r,
+ { out : coll.getName() + "_mr",
+ sort : { _id : 1 },
+ query : { 'location' : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } } } })
+
+}
+catch( e ){
+ // This should occur, since we can't in-mem sort for mreduce
+ printjson( e )
+}
diff --git a/jstests/geo_multinest0.js b/jstests/geo_multinest0.js
new file mode 100644
index 0000000..68e6095
--- /dev/null
+++ b/jstests/geo_multinest0.js
@@ -0,0 +1,63 @@
+// Make sure nesting of location arrays also works.
+
+t = db.geonest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
+
+
+
+
+
+// Try a complex nesting
+
+t = db.geonest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ [ 10, 10 ], { lat : 50, long : 50 } ], type : "home" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ { x : 30, y : 30 }, [ 50, 50 ] ], type : "home" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 50, 50 ], type : "work" } ] } )
+
+
+assert.isnull( db.getLastError() )
+
+// test normal access
+printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() )
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
+
+assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
+
+
+
diff --git a/jstests/geo_multinest1.js b/jstests/geo_multinest1.js
new file mode 100644
index 0000000..7754f24
--- /dev/null
+++ b/jstests/geo_multinest1.js
@@ -0,0 +1,37 @@
+// Test distance queries with interleaved distances
+
+t = db.multinest
+t.drop();
+
+t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
+ { loc : [ 29, 29 ], type : "work" } ] } )
+t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
+ { loc : [ 39, 39 ], type : "work" } ] } )
+t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
+ { loc : [ 49, 49 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+t.ensureIndex( { "data.loc" : "2d", zip : 1 } );
+assert.isnull( db.getLastError() )
+assert.eq( 2, t.getIndexKeys().length )
+
+t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
+ { loc : [ 59, 59 ], type : "work" } ] } )
+assert.isnull( db.getLastError() )
+
+// test normal access
+
+var result = t.find({ "data.loc" : { $near : [0, 0] } }).toArray();
+
+printjson( result )
+
+assert.eq( 8, result.length )
+
+var order = [ 1, 2, 1, 3, 2, 4, 3, 4 ]
+
+for( var i = 0; i < result.length; i++ ){
+ assert.eq( "1000" + order[i], result[i].zip )
+}
+
+
+
diff --git a/jstests/geo_oob_sphere.js b/jstests/geo_oob_sphere.js
new file mode 100644
index 0000000..d493f36
--- /dev/null
+++ b/jstests/geo_oob_sphere.js
@@ -0,0 +1,42 @@
+//
+// Ensures spherical queries report invalid latitude values in points and center positions
+//
+
+t = db.geooobsphere
+t.drop();
+
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 89 } })
+t.insert({ loc : { x : 30, y : 91 } })
+
+t.ensureIndex({ loc : "2d" })
+assert.isnull( db.getLastError() )
+
+t.find({ loc : { $nearSphere : [ 30, 91 ], $maxDistance : 0.25 } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+t.find({ loc : { $nearSphere : [ 30, 89 ], $maxDistance : 0.25 } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+t.find({ loc : { $within : { $centerSphere : [[ -180, -91 ], 0.25] } } }).count()
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+db.runCommand({ geoNear : "geooobsphere", near : [179, -91], maxDistance : 0.25, spherical : true })
+var err = db.getLastError()
+assert( err != null )
+printjson( err )
+
+db.runCommand({ geoNear : "geooobsphere", near : [30, 89], maxDistance : 0.25, spherical : true })
+var err = db.getLastError()
+assert( err != null )
+printjson( err ) \ No newline at end of file
diff --git a/jstests/geo_poly_edge.js b/jstests/geo_poly_edge.js
new file mode 100644
index 0000000..31a0849
--- /dev/null
+++ b/jstests/geo_poly_edge.js
@@ -0,0 +1,22 @@
+//
+// Tests polygon edge cases
+//
+
+var coll = db.getCollection( 'jstests_geo_poly_edge' )
+coll.drop();
+
+coll.ensureIndex({ loc : "2d" })
+
+coll.insert({ loc : [10, 10] })
+coll.insert({ loc : [10, -10] })
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, -10 ]] } } }).itcount(), 2 )
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, 10 ]] } } }).itcount(), 1 )
+
+
+coll.insert({ loc : [179, 0] })
+coll.insert({ loc : [0, 179] })
+
+assert.eq( coll.find({ loc : { $within : { $polygon : [[0, 0], [1000, 0], [1000, 1000], [0, 1000]] } } }).itcount(), 3 )
+
diff --git a/jstests/geo_poly_line.js b/jstests/geo_poly_line.js
new file mode 100644
index 0000000..aca77b6
--- /dev/null
+++ b/jstests/geo_poly_line.js
@@ -0,0 +1,17 @@
+// Test that weird polygons work SERVER-3725
+
+t = db.geo_polygon5;
+t.drop();
+
+t.insert({loc:[0,0]})
+t.insert({loc:[1,0]})
+t.insert({loc:[2,0]})
+t.insert({loc:[3,0]})
+t.insert({loc:[4,0]})
+
+t.ensureIndex( { loc : "2d" } );
+
+printjson( t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).toArray() )
+
+assert.eq( 5, t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).itcount() )
+
diff --git a/jstests/geo_polygon1.js b/jstests/geo_polygon1.js
new file mode 100644
index 0000000..4b7427a
--- /dev/null
+++ b/jstests/geo_polygon1.js
@@ -0,0 +1,74 @@
+//
+// Tests for N-dimensional polygon querying
+//
+
+t = db.geo_polygon1;
+t.drop();
+
+num = 0;
+for ( x=1; x < 9; x++ ){
+ for ( y= 1; y < 9; y++ ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+}
+
+t.ensureIndex( { loc : "2d" } );
+
+triangle = [[0,0], [1,1], [0,2]];
+
+// Look at only a small slice of the data within a triangle
+assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+
+boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+
+assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+
+//Make sure we can add object-based polygons
+assert.eq( num, t.find( { loc : { $within : { $polygon : { a : [-10, -10], b : [-10, 10], c : [10, 10], d : [10, -10] } } } } ).count() )
+
+// Look in a box much bigger than the one we have data in
+boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
+assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+
+t.drop();
+
+pacman = [
+ [0,2], [0,4], [2,6], [4,6], // Head
+ [6,4], [4,3], [6,2], // Mouth
+ [4,0], [2,0] // Bottom
+ ];
+
+t.save({loc: [1,3] }); // Add a point that's in
+t.ensureIndex( { loc : "2d" } );
+assert.isnull( db.getLastError() )
+
+assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
+
+t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening
+t.save({ loc : [3, 7] }) // Add a point above the center of the head
+t.save({ loc : [3,-1] }) // Add a point below the center of the bottom
+
+assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+
+// Make sure we can't add bad polygons
+okay = true
+try{
+ t.find( { loc : { $within : { $polygon : [1, 2] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+try{
+ t.find( { loc : { $within : { $polygon : [[1, 2]] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+try{
+ t.find( { loc : { $within : { $polygon : [[1, 2], [2, 3]] } } } ).toArray()
+ okay = false
+}
+catch(e){}
+assert(okay)
+
diff --git a/jstests/geo_polygon2.js b/jstests/geo_polygon2.js
new file mode 100644
index 0000000..617801b
--- /dev/null
+++ b/jstests/geo_polygon2.js
@@ -0,0 +1,266 @@
+//
+// More tests for N-dimensional polygon querying
+//
+
+// Create a polygon of some shape (no holes)
+// using turtle graphics. Basically, will look like a very contorted octopus (quad-pus?) shape.
+// There are no holes, but some edges will probably touch.
+
+var numTests = 10
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ Random.srand( 1337 + test );
+
+ var numTurtles = 4;
+ var gridSize = [ 40, 40 ];
+ var turtleSteps = 500;
+ var bounds = [ Random.rand() * -1000000 + 0.00001, Random.rand() * 1000000 + 0.00001 ]
+ var rotation = Math.PI * Random.rand();
+ var bits = Math.floor( Random.rand() * 32 );
+
+ printjson( { test : test, rotation : rotation, bits : bits })
+
+ var rotatePoint = function( x, y ) {
+
+ if( y == undefined ){
+ y = x[1]
+ x = x[0]
+ }
+
+ xp = x * Math.cos( rotation ) - y * Math.sin( rotation )
+ yp = y * Math.cos( rotation ) + x * Math.sin( rotation )
+
+ var scaleX = (bounds[1] - bounds[0]) / 360
+ var scaleY = (bounds[1] - bounds[0]) / 360
+
+ x *= scaleX
+ y *= scaleY
+
+ return [xp, yp]
+
+ }
+
+
+ var grid = []
+ for ( var i = 0; i < gridSize[0]; i++ ) {
+ grid.push( new Array( gridSize[1] ) )
+ }
+
+ grid.toString = function() {
+
+ var gridStr = "";
+ for ( var j = grid[0].length - 1; j >= -1; j-- ) {
+ for ( var i = 0; i < grid.length; i++ ) {
+ if ( i == 0 )
+ gridStr += ( j == -1 ? " " : ( j % 10) ) + ": "
+ if ( j != -1 )
+ gridStr += "[" + ( grid[i][j] != undefined ? grid[i][j] : " " ) + "]"
+ else
+ gridStr += " " + ( i % 10 ) + " "
+ }
+ gridStr += "\n"
+ }
+
+ return gridStr;
+ }
+
+ var turtles = []
+ for ( var i = 0; i < numTurtles; i++ ) {
+
+ var up = ( i % 2 == 0 ) ? i - 1 : 0;
+ var left = ( i % 2 == 1 ) ? ( i - 1 ) - 1 : 0;
+
+ turtles[i] = [
+ [ Math.floor( gridSize[0] / 2 ), Math.floor( gridSize[1] / 2 ) ],
+ [ Math.floor( gridSize[0] / 2 ) + left, Math.floor( gridSize[1] / 2 ) + up ] ];
+
+ grid[turtles[i][1][0]][turtles[i][1][1]] = i
+
+ }
+
+ grid[Math.floor( gridSize[0] / 2 )][Math.floor( gridSize[1] / 2 )] = "S"
+
+ // print( grid.toString() )
+
+ var pickDirections = function() {
+
+ var up = Math.floor( Random.rand() * 3 )
+ if ( up == 2 )
+ up = -1
+
+ if ( up == 0 ) {
+ var left = Math.floor( Random.rand() * 3 )
+ if ( left == 2 )
+ left = -1
+ } else
+ left = 0
+
+ if ( Random.rand() < 0.5 ) {
+ var swap = left
+ left = up
+ up = swap
+ }
+
+ return [ left, up ]
+ }
+
+ for ( var s = 0; s < turtleSteps; s++ ) {
+
+ for ( var t = 0; t < numTurtles; t++ ) {
+
+ var dirs = pickDirections()
+ var up = dirs[0]
+ var left = dirs[1]
+
+ var lastTurtle = turtles[t][turtles[t].length - 1]
+ var nextTurtle = [ lastTurtle[0] + left, lastTurtle[1] + up ]
+
+ if ( nextTurtle[0] >= gridSize[0] || nextTurtle[1] >= gridSize[1] || nextTurtle[0] < 0 || nextTurtle[1] < 0 )
+ continue;
+
+ if ( grid[nextTurtle[0]][nextTurtle[1]] == undefined ) {
+ turtles[t].push( nextTurtle )
+ grid[nextTurtle[0]][nextTurtle[1]] = t;
+ }
+
+ }
+ }
+
+ // print( grid.toString() )
+
+ turtlePaths = []
+ for ( var t = 0; t < numTurtles; t++ ) {
+
+ turtlePath = []
+
+ var nextSeg = function(currTurtle, prevTurtle) {
+
+ var pathX = currTurtle[0]
+
+ if ( currTurtle[1] < prevTurtle[1] ) {
+ pathX = currTurtle[0] + 1
+ pathY = prevTurtle[1]
+ } else if ( currTurtle[1] > prevTurtle[1] ) {
+ pathX = currTurtle[0]
+ pathY = currTurtle[1]
+ } else if ( currTurtle[0] < prevTurtle[0] ) {
+ pathX = prevTurtle[0]
+ pathY = currTurtle[1]
+ } else if ( currTurtle[0] > prevTurtle[0] ) {
+ pathX = currTurtle[0]
+ pathY = currTurtle[1] + 1
+ }
+
+ // print( " Prev : " + prevTurtle + " Curr : " + currTurtle + " path
+ // : "
+ // + [pathX, pathY]);
+
+ return [ pathX, pathY ]
+ }
+
+ for ( var s = 1; s < turtles[t].length; s++ ) {
+
+ currTurtle = turtles[t][s]
+ prevTurtle = turtles[t][s - 1]
+
+ turtlePath.push( nextSeg( currTurtle, prevTurtle ) )
+
+ }
+
+ for ( var s = turtles[t].length - 2; s >= 0; s-- ) {
+
+ currTurtle = turtles[t][s]
+ prevTurtle = turtles[t][s + 1]
+
+ turtlePath.push( nextSeg( currTurtle, prevTurtle ) )
+
+ }
+
+ // printjson( turtlePath )
+
+ // End of the line is not inside our polygon.
+ var lastTurtle = turtles[t][turtles[t].length - 1]
+ grid[lastTurtle[0]][lastTurtle[1]] = undefined
+
+ fixedTurtlePath = []
+ for ( var s = 1; s < turtlePath.length; s++ ) {
+
+ if ( turtlePath[s - 1][0] == turtlePath[s][0] && turtlePath[s - 1][1] == turtlePath[s][1] )
+ continue;
+
+ var up = turtlePath[s][1] - turtlePath[s - 1][1]
+ var right = turtlePath[s][0] - turtlePath[s - 1][0]
+ var addPoint = ( up != 0 && right != 0 )
+
+ if ( addPoint && up != right ) {
+ fixedTurtlePath.push( [ turtlePath[s][0], turtlePath[s - 1][1] ] )
+ } else if ( addPoint ) {
+ fixedTurtlePath.push( [ turtlePath[s - 1][0], turtlePath[s][1] ] )
+ }
+
+ fixedTurtlePath.push( turtlePath[s] )
+
+ }
+
+ // printjson( fixedTurtlePath )
+
+ turtlePaths.push( fixedTurtlePath )
+
+ }
+
+ // Uncomment to print polygon shape
+ // print( grid.toString() )
+
+ var polygon = []
+ for ( var t = 0; t < turtlePaths.length; t++ ) {
+ for ( var s = 0; s < turtlePaths[t].length; s++ ) {
+ polygon.push( rotatePoint( turtlePaths[t][s] ) )
+ }
+ }
+
+ // Uncomment to print out polygon
+ // printjson( polygon )
+
+ t = db.polytest2
+ t.drop()
+
+ // Test single and multi-location documents
+ var pointsIn = 0
+ var pointsOut = 0
+ var allPointsIn = []
+ var allPointsOut = []
+
+ for ( var j = grid[0].length - 1; j >= 0; j-- ) {
+ for ( var i = 0; i < grid.length; i++ ) {
+
+ var point = rotatePoint( [ i + 0.5, j + 0.5 ] )
+
+ t.insert( { loc : point } )
+ if ( grid[i][j] != undefined ){
+ allPointsIn.push( point )
+ pointsIn++
+ }
+ else{
+ allPointsOut.push( point )
+ pointsOut++
+ }
+ }
+ }
+
+ t.ensureIndex( { loc : "2d" }, { bits : 1 + bits, max : bounds[1], min : bounds[0] } )
+ assert.isnull( db.getLastError() )
+
+ t.insert( { loc : allPointsIn } )
+ t.insert( { loc : allPointsOut } )
+ allPoints = allPointsIn.concat( allPointsOut )
+ t.insert( { loc : allPoints } )
+
+ print( "Points : " )
+ printjson( { pointsIn : pointsIn, pointsOut : pointsOut } )
+ //print( t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() )
+
+ assert.eq( gridSize[0] * gridSize[1] + 3, t.find().count() )
+ assert.eq( 2 + pointsIn, t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() );
+
+}
diff --git a/jstests/geo_polygon3.js b/jstests/geo_polygon3.js
new file mode 100644
index 0000000..9fdff1a
--- /dev/null
+++ b/jstests/geo_polygon3.js
@@ -0,0 +1,54 @@
+//
+// Tests for polygon querying with varying levels of accuracy
+//
+
+var numTests = 31;
+
+for( var n = 0; n < numTests; n++ ){
+
+ t = db.geo_polygon3;
+ t.drop();
+
+ num = 0;
+ for ( x=1; x < 9; x++ ){
+ for ( y= 1; y < 9; y++ ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+ }
+
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+
+ triangle = [[0,0], [1,1], [0,2]];
+
+ // Look at only a small slice of the data within a triangle
+ assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+
+
+ boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+
+ // Look in a box much bigger than the one we have data in
+ boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+
+ t.drop();
+
+ pacman = [
+ [0,2], [0,4], [2,6], [4,6], // Head
+ [6,4], [4,3], [6,2], // Mouth
+ [4,0], [2,0] // Bottom
+ ];
+
+ t.save({loc: [1,3] }); // Add a point that's in
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + t } );
+
+ assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
+
+ t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening
+ t.save({ loc : [3, 7] }) // Add a point above the center of the head
+ t.save({ loc : [3,-1] }) // Add a point below the center of the bottom
+
+ assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+}
diff --git a/jstests/geo_regex0.js b/jstests/geo_regex0.js
new file mode 100644
index 0000000..79042b9
--- /dev/null
+++ b/jstests/geo_regex0.js
@@ -0,0 +1,18 @@
+// From SERVER-2247
+// Tests to make sure regex works with geo indices
+
+t = db.regex0
+t.drop()
+
+t.ensureIndex( { point : '2d', words : 1 } )
+t.insert( { point : [ 1, 1 ], words : [ 'foo', 'bar' ] } )
+
+regex = { words : /^f/ }
+geo = { point : { $near : [ 1, 1 ] } }
+both = { point : { $near : [ 1, 1 ] }, words : /^f/ }
+
+assert.eq(1, t.find( regex ).count() )
+assert.eq(1, t.find( geo ).count() )
+assert.eq(1, t.find( both ).count() )
+
+
diff --git a/jstests/geo_small_large.js b/jstests/geo_small_large.js
new file mode 100644
index 0000000..aff4743
--- /dev/null
+++ b/jstests/geo_small_large.js
@@ -0,0 +1,151 @@
+// SERVER-2386, general geo-indexing using very large and very small bounds
+
+load( "jstests/libs/geo_near_random.js" );
+
+// Do some random tests (for near queries) with very large and small ranges
+
+var test = new GeoNearRandomTest( "geo_small_large" );
+
+bounds = { min : -Math.pow( 2, 34 ), max : Math.pow( 2, 34 ) };
+
+test.insertPts( 50, bounds );
+
+printjson( db["geo_small_large"].find().limit( 10 ).toArray() )
+
+test.testPt( [ 0, 0 ] );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+
+test = new GeoNearRandomTest( "geo_small_large" );
+
+bounds = { min : -Math.pow( 2, -34 ), max : Math.pow( 2, -34 ) };
+
+test.insertPts( 50, bounds );
+
+printjson( db["geo_small_large"].find().limit( 10 ).toArray() )
+
+test.testPt( [ 0, 0 ] );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt( test.mkPt( undefined, bounds ) );
+
+
+// Check that our box and circle queries also work
+var scales = [ Math.pow( 2, 40 ), Math.pow( 2, -40 ), Math.pow(2, 2), Math.pow(3, -15), Math.pow(3, 15) ]
+
+for ( var i = 0; i < scales.length; i++ ) {
+
+ scale = scales[i];
+
+ var eps = Math.pow( 2, -7 ) * scale;
+ var radius = 5 * scale;
+ var max = 10 * scale;
+ var min = -max;
+ var range = max - min;
+ var bits = 2 + Math.random() * 30
+
+ var t = db["geo_small_large"]
+ t.drop();
+ t.ensureIndex( { p : "2d" }, { min : min, max : max, bits : bits })
+
+ var outPoints = 0;
+ var inPoints = 0;
+
+ printjson({ eps : eps, radius : radius, max : max, min : min, range : range, bits : bits })
+
+ // Put a point slightly inside and outside our range
+ for ( var j = 0; j < 2; j++ ) {
+ var currRad = ( j % 2 == 0 ? radius + eps : radius - eps );
+ t.insert( { p : { x : currRad, y : 0 } } );
+ print( db.getLastError() )
+ }
+
+ printjson( t.find().toArray() );
+
+ assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1, "Incorrect center points found!" )
+ assert.eq( t.count( { p : { $within : { $box : [ [ -radius, -radius ], [ radius, radius ] ] } } } ), 1,
+ "Incorrect box points found!" )
+
+ shouldFind = []
+ randoms = []
+
+ for ( var j = 0; j < 2; j++ ) {
+
+ var randX = Math.random(); // randoms[j].randX
+ var randY = Math.random(); // randoms[j].randY
+
+ randoms.push({ randX : randX, randY : randY })
+
+ var x = randX * ( range - eps ) + eps + min;
+ var y = randY * ( range - eps ) + eps + min;
+
+ t.insert( { p : [ x, y ] } );
+
+ if ( x * x + y * y > radius * radius ){
+ // print( "out point ");
+ // printjson({ x : x, y : y })
+ outPoints++
+ }
+ else{
+ // print( "in point ");
+ // printjson({ x : x, y : y })
+ inPoints++
+ shouldFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) })
+ }
+ }
+
+ /*
+ function printDiff( didFind, shouldFind ){
+
+ for( var i = 0; i < shouldFind.length; i++ ){
+ var beenFound = false;
+ for( var j = 0; j < didFind.length && !beenFound ; j++ ){
+ beenFound = shouldFind[i].x == didFind[j].x &&
+ shouldFind[i].y == didFind[j].y
+ }
+
+ if( !beenFound ){
+ print( "Could not find: " )
+ shouldFind[i].inRadius = ( radius - shouldFind[i].radius >= 0 )
+ printjson( shouldFind[i] )
+ }
+ }
+ }
+
+ print( "Finding random pts... ")
+ var found = t.find( { p : { $within : { $center : [[0, 0], radius ] } } } ).toArray()
+ var didFind = []
+ for( var f = 0; f < found.length; f++ ){
+ //printjson( found[f] )
+ var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0]
+ var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1]
+ didFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) })
+ }
+
+ print( "Did not find but should: ")
+ printDiff( didFind, shouldFind )
+ print( "Found but should not have: ")
+ printDiff( shouldFind, didFind )
+ */
+
+ assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1 + inPoints,
+ "Incorrect random center points found!\n" + tojson( randoms ) )
+
+ print("Found " + inPoints + " points in and " + outPoints + " points out.");
+
+ var found = t.find( { p : { $near : [0, 0], $maxDistance : radius } } ).toArray()
+ var dist = 0;
+ for( var f = 0; f < found.length; f++ ){
+ var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0]
+ var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1]
+ print( "Dist: x : " + x + " y : " + y + " dist : " + Math.sqrt( x * x + y * y) + " radius : " + radius )
+ }
+
+ assert.eq( t.count( { p : { $near : [0, 0], $maxDistance : radius } } ), 1 + inPoints,
+ "Incorrect random center points found near!\n" + tojson( randoms ) )
+
+}
+
diff --git a/jstests/geo_uniqueDocs.js b/jstests/geo_uniqueDocs.js
new file mode 100644
index 0000000..b77a3b4
--- /dev/null
+++ b/jstests/geo_uniqueDocs.js
@@ -0,0 +1,38 @@
+// Test uniqueDocs option for $within and geoNear queries SERVER-3139
+
+collName = 'geo_uniqueDocs_test'
+t = db.geo_uniqueDocs_test
+t.drop()
+
+t.save( { locs : [ [0,2], [3,4]] } )
+t.save( { locs : [ [6,8], [10,10] ] } )
+
+t.ensureIndex( { locs : '2d' } )
+
+// geoNear tests
+assert.eq(4, db.runCommand({geoNear:collName, near:[0,0]}).results.length)
+assert.eq(4, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:false}).results.length)
+assert.eq(2, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:true}).results.length)
+results = db.runCommand({geoNear:collName, near:[0,0], num:2}).results
+assert.eq(2, results.length)
+assert.eq(2, results[0].dis)
+assert.eq(5, results[1].dis)
+results = db.runCommand({geoNear:collName, near:[0,0], num:2, uniqueDocs:true}).results
+assert.eq(2, results.length)
+assert.eq(2, results[0].dis)
+assert.eq(10, results[1].dis)
+
+// $within tests
+
+assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]]}}}).count())
+assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : true}}}).count())
+assert.eq(4, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : false}}}).count())
+
+assert.eq(2, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : true}}}).count())
+assert.eq(3, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : false}}}).count())
diff --git a/jstests/getlog1.js b/jstests/getlog1.js
new file mode 100644
index 0000000..75fbeab
--- /dev/null
+++ b/jstests/getlog1.js
@@ -0,0 +1,24 @@
+// to run:
+// ./mongo jstests/<this-file>
+
+contains = function(arr,obj) {
+ var i = arr.length;
+ while (i--) {
+ if (arr[i] === obj) {
+ return true;
+ }
+ }
+ return false;
+}
+
+var resp = db.adminCommand({getLog:"*"})
+assert( resp.ok == 1, "error executing getLog command" );
+assert( resp.names, "no names field" );
+assert( resp.names.length > 0, "names array is empty" );
+assert( contains(resp.names,"global") , "missing global category" );
+assert( !contains(resp.names,"butty") , "missing butty category" );
+
+resp = db.adminCommand({getLog:"global"})
+assert( resp.ok == 1, "error executing getLog command" );
+assert( resp.log, "no log field" );
+assert( resp.log.length > 0 , "no log lines" );
diff --git a/jstests/group7.js b/jstests/group7.js
new file mode 100644
index 0000000..5bf9232
--- /dev/null
+++ b/jstests/group7.js
@@ -0,0 +1,43 @@
+// Test yielding group command SERVER-1395
+
+t = db.jstests_group7;
+t.drop();
+
+function checkForYield( docs, updates ) {
+ t.drop();
+ a = 0;
+ for( var i = 0; i < docs; ++i ) {
+ t.save( {a:a} );
+ }
+ db.getLastError();
+
+ // Iteratively update all a values atomically.
+ p = startParallelShell( 'for( a = 0; a < ' + updates + '; ++a ) { db.jstests_group7.update( {$atomic:true}, {$set:{a:a}}, false, true ); db.getLastError(); }' );
+
+ for( var i = 0; i < updates; ++i ) {
+ ret = t.group({key:{a:1},reduce:function(){},initial:{}});
+ // Check if group sees more than one a value, indicating that it yielded.
+ if ( ret.length > 1 ) {
+ p();
+ return true;
+ }
+ printjson( ret );
+ }
+
+ p();
+ return false;
+}
+
+var yielded = false;
+var docs = 1500;
+var updates = 50;
+for( var j = 1; j <= 6; ++j ) {
+ if ( checkForYield( docs, updates ) ) {
+ yielded = true;
+ break;
+ }
+ // Increase docs and updates to encourage yielding.
+ docs *= 2;
+ updates *= 2;
+}
+assert( yielded ); \ No newline at end of file
diff --git a/jstests/hint1.js b/jstests/hint1.js
index 63a5fa6..b5a580f 100644
--- a/jstests/hint1.js
+++ b/jstests/hint1.js
@@ -5,6 +5,12 @@ p.drop();
p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true } );
p.ensureIndex( { ts: 1 } );
-e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: " alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
-assert.eq( e.indexBounds.ts[0][0].getTime(), new Date( 1234119308272 ).getTime() , "A" );
-assert.eq( 0 , e.indexBounds.ts[0][1].getTime() , "B" );
+e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: "alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain();
+assert.eq(e.indexBounds.ts[0][0].getTime(), new Date(1234119308272).getTime(), "A");
+
+//printjson(e);
+
+assert.eq( /*just below min date is bool true*/true, e.indexBounds.ts[0][1], "B");
+
+assert.eq(1, p.find({ live: true, ts: { $lt: new Date(1234119308272) }, cls: "entry", verticals: "alleyinsider" }).sort({ ts: -1 }).hint({ ts: 1 }).count());
+
diff --git a/jstests/idhack.js b/jstests/idhack.js
new file mode 100644
index 0000000..9614ebc
--- /dev/null
+++ b/jstests/idhack.js
@@ -0,0 +1,23 @@
+
+t = db.idhack
+t.drop()
+
+
+t.insert( { _id : { x : 1 } , z : 1 } )
+t.insert( { _id : { x : 2 } , z : 2 } )
+t.insert( { _id : { x : 3 } , z : 3 } )
+t.insert( { _id : 1 , z : 4 } )
+t.insert( { _id : 2 , z : 5 } )
+t.insert( { _id : 3 , z : 6 } )
+
+assert.eq( 2 , t.findOne( { _id : { x : 2 } } ).z , "A1" )
+assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).count() , "A2" )
+assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).itcount() , "A3" )
+
+t.update( { _id : { x : 2 } } , { $set : { z : 7 } } )
+assert.eq( 7 , t.findOne( { _id : { x : 2 } } ).z , "B1" )
+
+t.update( { _id : { $gte : 2 } } , { $set : { z : 8 } } , false , true )
+assert.eq( 4 , t.findOne( { _id : 1 } ).z , "C1" )
+assert.eq( 8 , t.findOne( { _id : 2 } ).z , "C2" )
+assert.eq( 8 , t.findOne( { _id : 3 } ).z , "C3" )
diff --git a/jstests/in8.js b/jstests/in8.js
new file mode 100644
index 0000000..5e7e587
--- /dev/null
+++ b/jstests/in8.js
@@ -0,0 +1,23 @@
+// SERVER-2829 Test arrays matching themselves within a $in expression.
+
+t = db.jstests_in8;
+t.drop();
+
+t.save( {key: [1]} );
+t.save( {key: ['1']} );
+t.save( {key: [[2]]} );
+
+function doTest() {
+ assert.eq( 1, t.count( {key:[1]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) );
+ assert.eq( 1, t.count( {key:['1']} ) );
+ assert.eq( 1, t.count( {key:{$in:[['1']]}} ) );
+ assert.eq( 1, t.count( {key:[2]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[2]]}} ) );
+}
+
+doTest();
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/in9.js b/jstests/in9.js
new file mode 100644
index 0000000..34cefb8
--- /dev/null
+++ b/jstests/in9.js
@@ -0,0 +1,35 @@
+// SERVER-2343 Test $in empty array matching.
+
+t = db.jstests_in9;
+t.drop();
+
+function someData() {
+ t.remove();
+ t.save( {key: []} );
+}
+
+function moreData() {
+ someData();
+ t.save( {key: [1]} );
+ t.save( {key: ['1']} );
+ t.save( {key: null} );
+ t.save( {} );
+}
+
+function check() {
+ assert.eq( 1, t.count( {key:[]} ) );
+ assert.eq( 1, t.count( {key:{$in:[[]]}} ) );
+}
+
+function doTest() {
+ someData();
+ check();
+ moreData();
+ check();
+}
+
+doTest();
+
+// SERVER-1943 not fixed yet
+t.ensureIndex( {key:1} );
+doTest();
diff --git a/jstests/ina.js b/jstests/ina.js
new file mode 100644
index 0000000..cf614ab
--- /dev/null
+++ b/jstests/ina.js
@@ -0,0 +1,15 @@
+// Uassert when $elemMatch is attempted within $in SERVER-3545
+
+t = db.jstests_ina;
+t.drop();
+t.save( {} );
+
+assert.throws( function() { t.find( {a:{$in:[{$elemMatch:{b:1}}]}} ).itcount(); } );
+assert.throws( function() { t.find( {a:{$not:{$in:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+
+assert.throws( function() { t.find( {a:{$nin:[{$elemMatch:{b:1}}]}} ).itcount(); } );
+assert.throws( function() { t.find( {a:{$not:{$nin:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+
+// NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally
+// we assume that the first key is $elemMatch if any key is, and validating
+// every key is expensive in some cases. \ No newline at end of file
diff --git a/jstests/index11.js b/jstests/index11.js
index 2a552dd..0f6aa33 100644
--- a/jstests/index11.js
+++ b/jstests/index11.js
@@ -1,13 +1,29 @@
// Reindex w/ field too large to index
coll = db.jstests_index11;
-coll.drop();
+coll.drop();
+
+var str = "xxxxxxxxxxxxxxxx";
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + str;
+str = str + 'q';
+
+coll.insert({ k: 'a', v: str });
+
+assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 1");
-coll.ensureIndex({"k": 1, "v": 1});
-coll.insert({k: "x", v: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"});
-assert.eq(0, coll.find({"k": "x"}).count()); // SERVER-1716
+coll.ensureIndex({"k": 1, "v": 1});
+coll.insert({ k: "x", v: str });
-coll.dropIndexes();
-coll.ensureIndex({"k": 1, "v": 1});
+assert.eq(0, coll.find({"k": "x"}).count(), "B"); // SERVER-1716
-assert.eq(0, coll.find({"k": "x"}).count());
+coll.dropIndexes();
+coll.ensureIndex({"k": 1, "v": 1});
+
+assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 2");
diff --git a/jstests/index9.js b/jstests/index9.js
index c832783..04b9009 100644
--- a/jstests/index9.js
+++ b/jstests/index9.js
@@ -1,7 +1,15 @@
t = db.jstests_index9;
t.drop();
+db.createCollection( "jstests_index9" );
+assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index with default collection" );
+t.drop();
+db.createCollection( "jstests_index9", {autoIndexId: true} );
+assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index if autoIndexId: true" );
+
+t.drop();
db.createCollection( "jstests_index9", {autoIndexId:false} );
+assert.eq( 0, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 0 index if autoIndexId: false" );
t.createIndex( { _id:1 } );
assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ) );
t.createIndex( { _id:1 } );
diff --git a/jstests/index_big1.js b/jstests/index_big1.js
new file mode 100644
index 0000000..61260a3
--- /dev/null
+++ b/jstests/index_big1.js
@@ -0,0 +1,39 @@
+// check where "key to big" happens
+
+t = db.index_big1;
+
+N = 3200;
+t.drop();
+
+var s = "";
+
+for ( i=0; i<N; i++ ) {
+
+ t.insert( { a : i + .5 , x : s } )
+
+ s += "x";
+}
+
+t.ensureIndex( { a : 1 , x : 1 } )
+
+assert.eq( 2 , t.getIndexes().length );
+
+flip = -1;
+
+for ( i=0; i<N; i++ ) {
+ var c = t.find( { a : i + .5 } ).count();
+ if ( c == 1 ) {
+ assert.eq( -1 , flip , "flipping : " + i );
+ }
+ else {
+ if ( flip == -1 ) {
+ // print( "state flipped at: " + i );
+ flip = i;
+ }
+ }
+}
+
+//print(flip);
+//print(flip/1024);
+
+assert.eq( /*v0 index : 797*/1002, flip , "flip changed" );
diff --git a/jstests/index_bigkeys.js b/jstests/index_bigkeys.js
new file mode 100755
index 0000000..dfb05ad
--- /dev/null
+++ b/jstests/index_bigkeys.js
@@ -0,0 +1,78 @@
+
+t = db.bigkeysidxtest;
+
+var keys = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+var str = "aaaabbbbccccddddeeeeffffgggghhhh";
+str = str + str;
+
+for (var i = 2; i < 10; i++) {
+ keys[i] = str;
+ str = str + str;
+}
+print(str.length);
+
+var dir = 1;
+
+function go() {
+ if (dir == 1) {
+ for (var i = 1; i < 10; i++) {
+ t.insert({ _id: i, k: keys[i] });
+ }
+ }
+ else {
+ for (var i = 10; i >= 1; i--) {
+ t.insert({ _id: i, k: keys[i] });
+ }
+ }
+}
+
+var expect = null;
+
+var ok = true;
+
+function check() {
+ assert(t.validate().valid);
+
+ var c = t.find({ k: /^a/ }).count();
+
+ print("keycount:" + c);
+
+ if (expect) {
+ if (expect != c) {
+ print("count of keys doesn't match expected count of : " + expect + " got: " + c);
+ ok = false;
+ }
+ }
+ else {
+ expect = c;
+ }
+
+ //print(t.validate().result);
+}
+
+for (var pass = 1; pass <= 2; pass++) {
+ print("pass:" + pass);
+
+ t.drop();
+ t.ensureIndex({ k: 1 });
+ go();
+ check(); // check incremental addition
+
+ t.reIndex();
+ check(); // check bottom up
+
+ t.drop();
+ go();
+ t.ensureIndex({ k: 1 });
+ check(); // check bottom up again without reindex explicitly
+
+ t.drop();
+ go();
+ t.ensureIndex({ k: 1 }, { background: true });
+ check(); // check background (which should be incremental)
+
+ dir = -1;
+}
+
+assert(ok,"not ok");
diff --git a/jstests/index_check5.js b/jstests/index_check5.js
index 90ac301..eabb929 100644
--- a/jstests/index_check5.js
+++ b/jstests/index_check5.js
@@ -14,4 +14,4 @@ t.save( { "name" : "Player2" ,
assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "A" );
t.ensureIndex( { "scores.level" : 1 , "scores.score" : 1 } );
-assert.eq( 1 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" );
+assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" );
diff --git a/jstests/index_check8.js b/jstests/index_check8.js
index bc267df..1964ecb 100644
--- a/jstests/index_check8.js
+++ b/jstests/index_check8.js
@@ -4,12 +4,18 @@ t.drop();
t.insert( { a : 1 , b : 1 , c : 1 , d : 1 , e : 1 } )
t.ensureIndex( { a : 1 , b : 1 , c : 1 } )
-t.ensureIndex( { a : 1 , b : 1 , d : 1 , e : 1 } )
+t.ensureIndex({ a: 1, b: 1, d: 1, e: 1 })
+
+// this block could be added to many tests in theory...
+if ((new Date()) % 10 == 0) {
+ var coll = t.toString().substring(db.toString().length + 1);
+ print("compacting " + coll + " before continuing testing");
+ // don't check return code - false for mongos
+ print("ok: " + db.runCommand({ compact: coll, dev: true }));
+}
x = t.find( { a : 1 , b : 1 , d : 1 } ).sort( { e : 1 } ).explain()
assert( ! x.scanAndOrder , "A : " + tojson( x ) )
x = t.find( { a : 1 , b : 1 , c : 1 , d : 1 } ).sort( { e : 1 } ).explain()
//assert( ! x.scanAndOrder , "B : " + tojson( x ) )
-
-
diff --git a/jstests/index_fornew.js b/jstests/index_fornew.js
deleted file mode 100644
index 6c3c158..0000000
--- a/jstests/index_fornew.js
+++ /dev/null
@@ -1,13 +0,0 @@
-
-t = db.index_fornew;
-t.drop();
-
-t.insert( { x : 1 } )
-t.ensureIndex( { x : 1 } , { v : 1 } )
-assert.eq( 1 , t.getIndexes()[1].v , tojson( t.getIndexes() ) );
-
-assert.throws( function(){ t.findOne( { x : 1 } ); } )
-
-t.reIndex();
-assert.eq( 0 , t.getIndexes()[1].v , tojson( t.getIndexes() ) );
-assert( t.findOne( { x : 1 } ) );
diff --git a/jstests/index_maxkey.js b/jstests/index_maxkey.js
new file mode 100644
index 0000000..eba8126
--- /dev/null
+++ b/jstests/index_maxkey.js
@@ -0,0 +1,27 @@
+
+t = db.index_maxkey;
+
+for ( var indexVersion=0; indexVersion<=1; indexVersion++ ) {
+ t.drop();
+
+ s = "";
+
+ t.ensureIndex( { s : 1 } , { v : indexVersion } );
+ while ( true ) {
+ t.insert( { s : s } );
+ if ( t.find().count() == t.find().sort( { s : 1 } ).itcount() ) {
+ s += ".....";
+ continue;
+ }
+ var sz = Object.bsonsize( { s : s } ) - 2;
+ print( "indexVersion: " + indexVersion + " max key is : " + sz );
+ if ( indexVersion == 0 ) {
+ assert.eq( 821 , sz );
+ }
+ else if ( indexVersion == 1 ) {
+ assert.eq( 1026 , sz );
+ }
+ break;
+ }
+
+}
diff --git a/jstests/indexbindata.js b/jstests/indexbindata.js
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/jstests/indexbindata.js
diff --git a/jstests/indexk.js b/jstests/indexk.js
new file mode 100644
index 0000000..7cef95a
--- /dev/null
+++ b/jstests/indexk.js
@@ -0,0 +1,58 @@
+// Check correct result set when bounds each match different multikeys SERVER-958
+
+t = db.jstests_indexk;
+t.drop();
+
+t.insert({a:[1,10]});
+
+assert.eq( 1, t.count({a: {$gt:2, $lt:5}}) );
+assert.eq( 1, t.count({a: {$gt:2}}) );
+assert.eq( 1, t.count({a: {$lt:5}}) );
+
+assert.eq( 1, t.count({a: {$gt:5, $lt:2}}) );
+assert.eq( 1, t.count({a: {$gt:5}}) );
+assert.eq( 1, t.count({a: {$lt:2}}) );
+
+t.ensureIndex({a:1});
+
+// Check that only one constraint limits the index range for a multikey index.
+// The constraint used is arbitrary, but testing current behavior here.
+
+assert.eq( 1, t.count({a: {$gt: 2, $lt:5}}) );
+e = t.find({a: {$gt: 2, $lt:5}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+assert.eq( 2, e.indexBounds.a[ 0 ][ 0 ] );
+// Check that upper bound is large ( > 5 ).
+assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$lt: 5, $gt:2}}) );
+e = t.find({a: {$lt: 5, $gt:2}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+// Check that upper bound is low ( < 2 ).
+assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] );
+assert.eq( 5, e.indexBounds.a[ 0 ][ 1 ] );
+
+// Now check cases where no match is possible with a single key index.
+
+assert.eq( 1, t.count({a: {$gt: 5, $lt:2}}) );
+e = t.find({a: {$gt: 5, $lt:2}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+assert.eq( 5, e.indexBounds.a[ 0 ][ 0 ] );
+// Check that upper bound is low ( < 2 ).
+assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$lt: 2, $gt:5}}) );
+e = t.find({a: {$lt: 2, $gt:5}}).explain();
+assert.eq( 1, e.nscanned );
+assert.eq( 1, e.n );
+// Check that upper bound is large ( > 5 ).
+assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] );
+assert.eq( 2, e.indexBounds.a[ 0 ][ 1 ] );
+
+assert.eq( 1, t.count({a: {$gt: 2}}) );
+assert.eq( 1, t.count({a: {$lt: 5}}) );
+
+// Check good performance of single key index \ No newline at end of file
diff --git a/jstests/indexl.js b/jstests/indexl.js
new file mode 100644
index 0000000..666586d
--- /dev/null
+++ b/jstests/indexl.js
@@ -0,0 +1,27 @@
+// Check nonoverlapping $in/$all with multikeys SERVER-2165
+
+t = db.jstests_indexl;
+
+function test(t) {
+ t.save( {a:[1,2]} );
+ assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
+ assert.eq( 1, t.count( {a:{$all:[2],$in:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[2],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[1],$all:[2]}} ) );
+ assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
+ t.save({a:[3,4]})
+ t.save({a:[2,3]})
+ t.save({a:[1,2,3,4]})
+ assert.eq( 2, t.count( {a:{$in:[2],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[3],$all:[1,2]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[1],$all:[3]}} ) );
+ assert.eq( 2, t.count( {a:{$in:[2,3],$all:[1]}} ) );
+ assert.eq( 1, t.count( {a:{$in:[4],$all:[2,3]}} ) );
+ assert.eq( 3, t.count( {a:{$in:[1,3],$all:[2]}} ) );
+}
+
+t.drop();
+test(t);
+t.drop();
+t.ensureIndex( {a:1} );
+test(t); \ No newline at end of file
diff --git a/jstests/indexm.js b/jstests/indexm.js
new file mode 100644
index 0000000..6b31ea6
--- /dev/null
+++ b/jstests/indexm.js
@@ -0,0 +1,38 @@
+// Check proper range combinations with or clauses overlapping non or portion of query SERVER-2302
+
+t = db.jstests_indexm;
+t.drop();
+
+t.save( { a : [ { x : 1 } , { x : 2 } , { x : 3 } , { x : 4 } ] } )
+
+function test(){
+ assert.eq( 1, t.count(
+ {
+ a : { x : 1 } ,
+ "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ]
+ }
+ ) );
+}
+
+// The first find will return a result since there isn't an index.
+test();
+
+// Now create an index.
+t.ensureIndex({"a":1});
+test();
+// SERVER-3105
+//assert( !t.find(
+// {
+// a : { x : 1 } ,
+// "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ]
+// }
+// ).explain().clauses );
+
+// Now create a different index.
+t.dropIndexes();
+t.ensureIndex({"a.x":1});
+test();
+
+// Drop the indexes.
+t.dropIndexes();
+test(); \ No newline at end of file
diff --git a/jstests/indexn.js b/jstests/indexn.js
new file mode 100644
index 0000000..d5800e4
--- /dev/null
+++ b/jstests/indexn.js
@@ -0,0 +1,41 @@
+// Check fast detection of empty result set with a single key index SERVER-958.
+
+t = db.jstests_indexn;
+t.drop();
+
+function checkImpossibleMatchDummyCursor( explain ) {
+ assert.eq( 'BasicCursor', explain.cursor );
+ assert.eq( 0, explain.nscanned );
+ assert.eq( 0, explain.n );
+}
+
+t.save( {a:1,b:[1,2]} );
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0}} ) );
+// {a:1} is a single key index, so no matches are possible for this query
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:2} ) );
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:2} ).explain() );
+
+assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ) );
+checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ).explain() );
+
+assert.eq( 1, t.count( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ) );
+checkImpossibleMatchDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] );
+
+// A following invalid range is eliminated.
+assert.eq( 1, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ) );
+assert.eq( null, t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain().clauses );
+
+t.save( {a:2} );
+
+// An intermediate invalid range is eliminated.
+assert.eq( 2, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ) );
+explain = t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ).explain();
+assert.eq( 2, explain.clauses.length );
+assert.eq( [[1,1]], explain.clauses[ 0 ].indexBounds.a );
+assert.eq( [[2,2]], explain.clauses[ 1 ].indexBounds.a );
diff --git a/jstests/indexo.js b/jstests/indexo.js
new file mode 100644
index 0000000..e50c099
--- /dev/null
+++ b/jstests/indexo.js
@@ -0,0 +1,32 @@
+// Check that dummy basic cursors work correctly SERVER-958.
+
+t = db.jstests_indexo;
+t.drop();
+
+function checkDummyCursor( explain ) {
+ assert.eq( "BasicCursor", explain.cursor );
+ assert.eq( 0, explain.nscanned );
+ assert.eq( 0, explain.n );
+}
+
+t.save( {a:1} );
+
+t.ensureIndex( {a:1} );
+
+// Match is impossible, so no documents should be scanned.
+checkDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() );
+
+t.drop();
+checkDummyCursor( t.find( {a:1} ).explain() );
+
+t.save( {a:1} );
+t.ensureIndex( {a:1} );
+checkDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] );
+
+t.drop();
+t.save( {a:5,b:[1,2]} );
+t.ensureIndex( {a:1,b:1} );
+t.ensureIndex( {a:1} );
+// The first clause will use index {a:1,b:1} with the current implementation.
+// The second clause has no valid values for index {a:1} so it will use a dummy cursor.
+checkDummyCursor( t.find( {$or:[{b:{$exists:true},a:{$gt:4}},{a:{$lt:6,$gt:4}}]} ).explain().clauses[ 1 ] );
diff --git a/jstests/indexp.js b/jstests/indexp.js
new file mode 100644
index 0000000..ee511eb
--- /dev/null
+++ b/jstests/indexp.js
@@ -0,0 +1,58 @@
+// Check recording and playback of good query plans with different index types SERVER-958.
+
+t = db.jstests_indexp;
+t.drop();
+
+function expectRecordedPlan( query, idx ) {
+ assert.eq( "BtreeCursor " + idx, t.find( query ).explain( true ).oldPlan.cursor );
+}
+
+function expectNoRecordedPlan( query ) {
+ assert.isnull( t.find( query ).explain( true ).oldPlan );
+}
+
+// Basic test
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1} ).itcount();
+expectRecordedPlan( {a:1}, "a_1" );
+
+// Index type changes
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1} ).itcount();
+t.save( {a:[1,2]} );
+expectRecordedPlan( {a:1}, "a_1" );
+
+// Multi key QueryPattern reuses index
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:[1,2]} );
+t.find( {a:{$gt:0}} ).itcount();
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+
+// Single key QueryPattern can still be used to find best plan - at least for now.
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:{$gt:0,$lt:5}} ).itcount();
+t.save( {a:[1,2]} );
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+
+// Invalid query with only valid fields used
+if ( 0 ) { // SERVER-2864
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:1,b:{$gt:5,$lt:0}} ).itcount();
+expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" );
+}
+
+// Dummy query plan not stored
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( {a:1} );
+t.find( {a:{$gt:5,$lt:0}} ).itcount();
+expectNoRecordedPlan( {a:{$gt:5,$lt:0}} ); \ No newline at end of file
diff --git a/jstests/indexq.js b/jstests/indexq.js
new file mode 100644
index 0000000..f067b3c
--- /dev/null
+++ b/jstests/indexq.js
@@ -0,0 +1,14 @@
+// Test multikey range preference for a fully included range SERVER-958.
+
+t = db.jstests_indexq;
+t.drop();
+
+t.ensureIndex( {a:1} );
+// Single key index
+assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
+assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
+
+t.save( {a:[1,3]} );
+// Now with multi key index.
+assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] );
+assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a );
diff --git a/jstests/indexr.js b/jstests/indexr.js
new file mode 100644
index 0000000..60ecfb1
--- /dev/null
+++ b/jstests/indexr.js
@@ -0,0 +1,47 @@
+// Check multikey index cases with parallel nested fields SERVER-958.
+
+t = db.jstests_indexr;
+t.drop();
+
+// Check without indexes.
+t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+
+// Check with single key indexes.
+t.remove();
+t.ensureIndex( {'a.b':1,'a.c':1} );
+t.ensureIndex( {a:1,'a.c':1} );
+assert.eq( 0, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 0, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+
+t.save( { a: { b: 3, c: 3 } } );
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] );
+
+// Check with multikey indexes.
+t.remove();
+t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
+
+assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
+assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
+assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] );
+
+// Check reverse direction.
+assert.eq( 1, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).itcount() );
+assert.eq( 1, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).itcount() );
+
+assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).explain().indexBounds['a.c'] );
+assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).explain().indexBounds['a.c'] );
+
+// Check second field is constrained if first is not.
+assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).itcount() );
+
+assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).explain().indexBounds['a.c'][0][1] );
+assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).explain().indexBounds['a.c'][0][1] );
diff --git a/jstests/indexs.js b/jstests/indexs.js
new file mode 100644
index 0000000..609f912
--- /dev/null
+++ b/jstests/indexs.js
@@ -0,0 +1,21 @@
+// Test index key generation issue with parent and nested fields in same index and array containing subobject SERVER-3005.
+
+t = db.jstests_indexs;
+
+t.drop();
+t.ensureIndex( {a:1} );
+t.save( { a: [ { b: 3 } ] } );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
+
+t.drop();
+t.ensureIndex( {a:1,'a.b':1} );
+t.save( { a: { b: 3 } } );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
+ib = t.find( { a:{ b:3 } } ).explain().indexBounds;
+
+t.drop();
+t.ensureIndex( {a:1,'a.b':1} );
+t.save( { a: [ { b: 3 } ] } );
+assert.eq( ib, t.find( { a:{ b:3 } } ).explain().indexBounds );
+assert.eq( 1, t.find( { a:{ b:3 } } ).explain().nscanned );
+assert.eq( 1, t.count( { a:{ b:3 } } ) );
diff --git a/jstests/indext.js b/jstests/indext.js
new file mode 100644
index 0000000..e418dc2
--- /dev/null
+++ b/jstests/indext.js
@@ -0,0 +1,21 @@
+// Sparse indexes with arrays SERVER-3216
+
+t = db.jstests_indext;
+t.drop();
+
+t.ensureIndex( {'a.b':1}, {sparse:true} );
+t.save( {a:[]} );
+t.save( {a:1} );
+assert.eq( 0, t.find().hint( {'a.b':1} ).itcount() );
+assert.eq( 0, t.find().hint( {'a.b':1} ).explain().nscanned );
+
+t.ensureIndex( {'a.b':1,'a.c':1}, {sparse:true} );
+t.save( {a:[]} );
+t.save( {a:1} );
+assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
+
+t.save( {a:[{b:1}]} );
+t.save( {a:1} );
+assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned );
diff --git a/jstests/indexu.js b/jstests/indexu.js
new file mode 100644
index 0000000..c7fa8ed
--- /dev/null
+++ b/jstests/indexu.js
@@ -0,0 +1,137 @@
+// Test index key generation with duplicate values addressed by array index and
+// object field. SERVER-2902
+
+t = db.jstests_indexu;
+t.drop();
+
+var dupDoc = {a:[{'0':1}]}; // There are two 'a.0' fields in this doc.
+var dupDoc2 = {a:[{'1':1},'c']};
+var noDupDoc = {a:[{'1':1}]};
+
+// Test that we can't index dupDoc.
+t.save( dupDoc );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.0':1} );
+assert( db.getLastError() );
+
+t.remove();
+t.ensureIndex( {'a.0':1} );
+assert( !db.getLastError() );
+t.save( dupDoc );
+assert( db.getLastError() );
+
+// Test that we can't index dupDoc2.
+t.drop();
+t.save( dupDoc2 );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.1':1} );
+assert( db.getLastError() );
+
+t.remove();
+t.ensureIndex( {'a.1':1} );
+assert( !db.getLastError() );
+t.save( dupDoc2 );
+assert( db.getLastError() );
+
+// Test that we can index dupDoc with a different index.
+t.drop();
+t.ensureIndex( {'a.b':1} );
+t.save( dupDoc );
+assert( !db.getLastError() );
+
+// Test number field starting with hyphen.
+t.drop();
+t.ensureIndex( {'a.-1':1} );
+t.save( {a:[{'-1':1}]} );
+assert( !db.getLastError() );
+
+// Test number field starting with zero.
+t.drop();
+t.ensureIndex( {'a.00':1} );
+t.save( {a:[{'00':1}]} );
+assert( !db.getLastError() );
+
+// Test multiple array indexes
+t.drop();
+t.ensureIndex( {'a.0':1,'a.1':1} );
+t.save( {a:[{'1':1}]} );
+assert( !db.getLastError() );
+t.save( {a:[{'1':1},4]} );
+assert( db.getLastError() );
+
+// Test that we can index noDupDoc.
+t.drop();
+t.save( noDupDoc );
+t.ensureIndex( {'a.0':1} );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.1':1} );
+assert( !db.getLastError() );
+
+t.drop();
+t.ensureIndex( {'a.0':1} );
+t.ensureIndex( {'a.1':1} );
+t.save( noDupDoc );
+assert( !db.getLastError() );
+
+// Test that we can query noDupDoc.
+assert.eq( 1, t.find( {'a.1':1} ).hint( {'a.1':1} ).itcount() );
+assert.eq( 1, t.find( {'a.1':1} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {'a.0':1} ).itcount() );
+assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {$natural:1} ).itcount() );
+
+// Check multiple nested array fields.
+t.drop();
+t.save( {a:[[1]]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( !db.getLastError() );
+assert.eq( 1, t.find( {'a.0.0':1} ).hint( {$natural:1} ).itcount() );
+assert.eq( 1, t.find( {'a.0.0':1} ).hint( {'a.0.0':1} ).itcount() );
+
+// Check where there is a duplicate for a partially addressed field but not for a fully addressed field.
+t.drop();
+t.save( {a:[[1],{'0':1}]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Check where there is a duplicate for a fully addressed field.
+t.drop();
+t.save( {a:[[1],{'0':[1]}]} );
+assert( !db.getLastError() );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Two ways of addressing parse to an array.
+t.drop();
+t.save( {a:[{'0':1}]} );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+
+// Test several key depths - with same arrays being found.
+t.drop();
+t.save( {a:[{'0':[{'0':1}]}]} );
+t.ensureIndex( {'a.0.0.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a.0':1} );
+assert( db.getLastError() );
+t.ensureIndex( {'a':1} );
+assert( !db.getLastError() );
+
+// Two prefixes extract docs, but one terminates extraction before array.
+t.drop();
+t.save( {a:[{'0':{'c':[]}}]} );
+t.ensureIndex( {'a.0.c':1} );
+assert( db.getLastError() );
+
+t.drop();
+t.save( {a:[[{'b':1}]]} );
+assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
+t.ensureIndex( {'a.0.b':1} );
+assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
diff --git a/jstests/indexv.js b/jstests/indexv.js
new file mode 100644
index 0000000..a69ff2a
--- /dev/null
+++ b/jstests/indexv.js
@@ -0,0 +1,18 @@
+// Check null key generation.
+
+t = db.jstests_indexv;
+t.drop();
+
+t.ensureIndex( {'a.b':1} );
+
+t.save( {a:[{},{b:1}]} );
+var e = t.find( {'a.b':null} ).explain();
+assert.eq( 0, e.n );
+assert.eq( 1, e.nscanned );
+
+t.drop();
+t.ensureIndex( {'a.b.c':1} );
+t.save( {a:[{b:[]},{b:{c:1}}]} );
+var e = t.find( {'a.b.c':null} ).explain();
+assert.eq( 0, e.n );
+assert.eq( 1, e.nscanned );
diff --git a/jstests/indexw.js b/jstests/indexw.js
new file mode 100644
index 0000000..3264434
--- /dev/null
+++ b/jstests/indexw.js
@@ -0,0 +1,14 @@
+// Check that v0 keys are generated for v0 indexes SERVER-3375
+
+t = db.jstests_indexw;
+t.drop();
+
+t.save( {a:[]} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.ensureIndex( {a:1} );
+assert.eq( 1, t.count( {a:[]} ) );
+t.dropIndexes();
+
+// The count result is incorrect - just checking here that v0 key generation is used.
+t.ensureIndex( {a:1}, {v:0} );
+assert.eq( 0, t.count( {a:[]} ) );
diff --git a/jstests/insert1.js b/jstests/insert1.js
index 76edca1..7e6b73b 100644
--- a/jstests/insert1.js
+++ b/jstests/insert1.js
@@ -39,3 +39,6 @@ assert.eq(id1, id2, "ids match 4");
assert.eq(o, {a:4, _id:id1}, "input unchanged 4");
assert.eq(t.findOne({_id:id1}).a, 4, "find by id 4");
assert.eq(t.findOne({a:4})._id, id1 , "find by val 4");
+
+var stats = db.runCommand({ collstats: "insert1" });
+assert(stats.paddingFactor == 1.0);
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index 8624ef2..adf4f86 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -11,25 +11,46 @@ GeoNearRandomTest = function(name) {
}
-GeoNearRandomTest.prototype.mkPt = function mkPt(scale){
- scale = scale || 1; // scale is good for staying away from edges
- return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
}
-GeoNearRandomTest.prototype.insertPts = function(nPts) {
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds) {
assert.eq(this.nPts, 0, "insertPoints already called");
this.nPts = nPts;
for (var i=0; i<nPts; i++){
- this.t.insert({_id: i, loc: this.mkPt()});
+ this.t.insert({_id: i, loc: this.mkPt(undefined, indexBounds)});
}
-
- this.t.ensureIndex({loc: '2d'});
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
}
GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
for (var i=0; i < short.length; i++){
- assert.eq(short[i], long[i]);
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
}
}
diff --git a/jstests/replsets/key1 b/jstests/libs/key1
index b5c19e4..b5c19e4 100644
--- a/jstests/replsets/key1
+++ b/jstests/libs/key1
diff --git a/jstests/replsets/key2 b/jstests/libs/key2
index cbde821..cbde821 100644
--- a/jstests/replsets/key2
+++ b/jstests/libs/key2
diff --git a/jstests/libs/testconfig b/jstests/libs/testconfig
new file mode 100644
index 0000000..0c1fc87
--- /dev/null
+++ b/jstests/libs/testconfig
@@ -0,0 +1,4 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
diff --git a/jstests/mr_errorhandling.js b/jstests/mr_errorhandling.js
index c4e1137..f872b68 100644
--- a/jstests/mr_errorhandling.js
+++ b/jstests/mr_errorhandling.js
@@ -47,3 +47,5 @@ assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A"
res.drop()
assert.throws( function(){ t.mapReduce( m_good , r , { out : "xxx" , query : "foo" } ); } )
+
+printjson( t.mapReduce( function(){ emit( 1 , db.foo.findOne() ); } , r , { out : { inline : true } } ) )
diff --git a/jstests/mr_merge2.js b/jstests/mr_merge2.js
new file mode 100644
index 0000000..520bbfd
--- /dev/null
+++ b/jstests/mr_merge2.js
@@ -0,0 +1,37 @@
+
+t = db.mr_merge2;
+t.drop();
+
+t.insert( { a : [ 1 , 2 ] } )
+t.insert( { a : [ 2 , 3 ] } )
+t.insert( { a : [ 3 , 4 ] } )
+
+outName = "mr_merge2_out";
+out = db[outName];
+out.drop();
+
+m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); }
+r = function(k,vs){ return Array.sum( vs ); }
+
+function tos( o ){
+ var s = "";
+ for ( var i=0; i<100; i++ ){
+ if ( o[i] )
+ s += i + "_" + o[i] + "|";
+ }
+ return s;
+}
+
+
+outOptions = { out : { merge : outName } }
+
+res = t.mapReduce( m , r , outOptions )
+expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 }
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
+
+t.insert( { a : [ 4 , 5 ] } )
+res = t.mapReduce( m , r , outOptions )
+expected["4"]++;
+expected["5"] = 1
+assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+
diff --git a/jstests/numberint.js b/jstests/numberint.js
new file mode 100644
index 0000000..258450f
--- /dev/null
+++ b/jstests/numberint.js
@@ -0,0 +1,92 @@
+assert.eq.automsg( "0", "new NumberInt()" );
+
+n = new NumberInt( 4 );
+assert.eq.automsg( "4", "n" );
+assert.eq.automsg( "4", "n.toNumber()" );
+assert.eq.automsg( "8", "n + 4" );
+assert.eq.automsg( "'NumberInt(4)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(4)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(4) }'", "p" );
+
+assert.eq.automsg( "NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberInt( -4 );
+assert.eq.automsg( "-4", "n" );
+assert.eq.automsg( "-4", "n.toNumber()" );
+assert.eq.automsg( "0", "n + 4" );
+assert.eq.automsg( "'NumberInt(-4)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(-4)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(-4) }'", "p" );
+
+n = new NumberInt( "11111" );
+assert.eq.automsg( "'NumberInt(11111)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(11111)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(11111) }'", "p" );
+
+assert.eq.automsg( "NumberInt('11111' )", "eval( tojson( NumberInt( '11111' ) ) )" );
+assert.eq.automsg( "a", "eval( tojson( a ) )" );
+
+n = new NumberInt( "-11111" );
+assert.eq.automsg( "-11111", "n.toNumber()" );
+assert.eq.automsg( "-11107", "n + 4" );
+assert.eq.automsg( "'NumberInt(-11111)'", "n.toString()" );
+assert.eq.automsg( "'NumberInt(-11111)'", "tojson( n )" );
+a = {}
+a.a = n;
+p = tojson( a );
+assert.eq.automsg( "'{ \"a\" : NumberInt(-11111) }'", "p" );
+
+// parsing: v8 evaluates not numbers to 0 which is not bad
+//assert.throws.automsg( function() { new NumberInt( "" ); } );
+//assert.throws.automsg( function() { new NumberInt( "y" ); } );
+
+// eq
+
+assert.eq( { x : 5 } , { x : new NumberInt( "5" ) } );
+
+assert( 5 == NumberInt( 5 ) , "eq" );
+assert( 5 < NumberInt( 6 ) , "lt" );
+assert( 5 > NumberInt( 4 ) , "lt" );
+assert( NumberInt( 1 ) , "to bool a" );
+
+// objects are always considered thruthy
+//assert( ! NumberInt( 0 ) , "to bool b" );
+
+// create doc with int value in db
+t = db.getCollection( "numberint" );
+t.drop();
+
+o = { a : NumberInt(42) };
+t.save( o );
+
+assert.eq( 42 , t.findOne().a , "save doc 1" );
+assert.eq( 1 , t.find({a: {$type: 16}}).count() , "save doc 2" );
+assert.eq( 0 , t.find({a: {$type: 1}}).count() , "save doc 3" );
+
+// roundtripping
+mod = t.findOne({a: 42});
+mod.a += 10;
+mod.b = "foo";
+delete mod._id;
+t.save(mod);
+assert.eq( 2 , t.find({a: {$type: 16}}).count() , "roundtrip 1" );
+assert.eq( 0 , t.find({a: {$type: 1}}).count() , "roundtrip 2" );
+assert.eq( 1 , t.find({a: 52}).count() , "roundtrip 3" );
+
+// save regular number
+t.save({a: 42});
+assert.eq( 2 , t.find({a: {$type: 16}}).count() , "normal 1" );
+assert.eq( 1 , t.find({a: {$type: 1}}).count() , "normal 2" );
+assert.eq( 2 , t.find({a: 42}).count() , "normal 3" );
+
+
diff --git a/jstests/numberlong2.js b/jstests/numberlong2.js
new file mode 100644
index 0000000..2540d2d
--- /dev/null
+++ b/jstests/numberlong2.js
@@ -0,0 +1,32 @@
+// Test precision of NumberLong values with v1 index code SERVER-3717
+
+if ( 1 ) { // SERVER-3717
+
+t = db.jstests_numberlong2;
+t.drop();
+
+t.ensureIndex( {x:1} );
+
+function chk(longNum) {
+ t.remove();
+ t.save({ x: longNum });
+ assert.eq(longNum, t.find().hint({ x: 1 }).next().x);
+ assert.eq(longNum, t.find({}, { _id: 0, x: 1 }).hint({ x: 1 }).next().x);
+}
+
+chk( NumberLong("1123539983311657217") );
+chk(NumberLong("-1123539983311657217"));
+ chk(NumberLong("4503599627370495"));
+ chk(NumberLong("4503599627370496"));
+ chk(NumberLong("4503599627370497"));
+
+t.remove();
+
+s = "11235399833116571";
+for( i = 99; i >= 0; --i ) {
+ t.save( {x:NumberLong( s + i )} );
+}
+
+assert.eq( t.find().sort( {x:1} ).hint( {$natural:1} ).toArray(), t.find().sort( {x:1} ).hint( {x:1} ).toArray() );
+
+} \ No newline at end of file
diff --git a/jstests/numberlong3.js b/jstests/numberlong3.js
new file mode 100644
index 0000000..10036c0
--- /dev/null
+++ b/jstests/numberlong3.js
@@ -0,0 +1,25 @@
+// Test sorting with long longs and doubles - SERVER-3719
+
+t = db.jstests_numberlong3;
+t.drop();
+
+s = "11235399833116571";
+for( i = 10; i >= 0; --i ) {
+ n = NumberLong( s + i );
+ t.save( {x:n} );
+ if ( 0 ) { // SERVER-3719
+ t.save( {x:n.floatApprox} );
+ }
+}
+
+ret = t.find().sort({x:1}).toArray().filter( function( x ) { return typeof( x.x.floatApprox ) != 'undefined' } );
+
+//printjson( ret );
+
+for( i = 1; i < ret.length; ++i ) {
+ first = ret[i-1].x.toString();
+ second = ret[i].x.toString();
+ if ( first.length == second.length ) {
+ assert.lte( ret[i-1].x.toString(), ret[i].x.toString() );
+ }
+}
diff --git a/jstests/or1.js b/jstests/or1.js
index 66162c4..66bbd2e 100644
--- a/jstests/or1.js
+++ b/jstests/or1.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
diff --git a/jstests/or2.js b/jstests/or2.js
index d90cc85..297542e 100644
--- a/jstests/or2.js
+++ b/jstests/or2.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
@@ -29,7 +29,6 @@ doTest = function( index ) {
assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$or:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray();
checkArrs( [ { _id:0, x:0, a:1 } ], a1 );
diff --git a/jstests/or3.js b/jstests/or3.js
index be85a8f..97028be 100644
--- a/jstests/or3.js
+++ b/jstests/or3.js
@@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
@@ -29,8 +29,6 @@ doTest = function( index ) {
assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$nor:[{x:0}]} ] } ).toArray(); } );
an1 = t.find( { $nor: [ { a : 1 } ] } ).toArray();
checkArrs( t.find( {a:{$ne:1}} ).toArray(), an1 );
diff --git a/jstests/or4.js b/jstests/or4.js
index f793f36..3bfe191 100644
--- a/jstests/or4.js
+++ b/jstests/or4.js
@@ -17,7 +17,7 @@ checkArrs = function( a, b ) {
bStr = [];
a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i in aStr ) {
+ for ( i = 0; i < aStr.length; ++i ) {
assert( -1 != bStr.indexOf( aStr[ i ] ), m );
}
}
diff --git a/jstests/ord.js b/jstests/ord.js
index 4612f21..f78e504 100644
--- a/jstests/ord.js
+++ b/jstests/ord.js
@@ -28,6 +28,7 @@ for( i = 0; i < 90; ++i ) {
// the index key {a:1}
t.dropIndex( {a:1} );
+db.getLastError();
// Dropping an index kills all cursors on the indexed namespace, not just those
// cursors using the dropped index.
diff --git a/jstests/org.js b/jstests/org.js
new file mode 100644
index 0000000..0833798
--- /dev/null
+++ b/jstests/org.js
@@ -0,0 +1,19 @@
+// SERVER-2282 $or de duping with sparse indexes
+
+t = db.jstests_org;
+t.drop();
+
+t.ensureIndex( {a:1}, {sparse:true} );
+t.ensureIndex( {b:1} );
+
+t.remove();
+t.save( {a:1,b:2} );
+assert.eq( 1, t.count( {$or:[{a:1},{b:2}]} ) );
+
+t.remove();
+t.save( {a:null,b:2} );
+assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
+
+t.remove();
+t.save( {b:2} );
+assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
diff --git a/jstests/orh.js b/jstests/orh.js
new file mode 100644
index 0000000..35f6a5b
--- /dev/null
+++ b/jstests/orh.js
@@ -0,0 +1,17 @@
+// SERVER-2831 Demonstration of sparse index matching semantics in a multi index $or query.
+
+t = db.jstests_orh;
+t.drop();
+
+t.ensureIndex( {a:1}, {sparse:true} );
+t.ensureIndex( {b:1,a:1} );
+
+t.remove();
+t.save( {b:2} );
+assert.eq( 0, t.count( {a:null} ) );
+assert.eq( 1, t.count( {b:2,a:null} ) );
+
+assert.eq( 1, t.count( {$or:[{b:2,a:null},{a:null}]} ) );
+
+// Is this desired?
+assert.eq( 0, t.count( {$or:[{a:null},{b:2,a:null}]} ) );
diff --git a/jstests/ori.js b/jstests/ori.js
new file mode 100644
index 0000000..9d923d6
--- /dev/null
+++ b/jstests/ori.js
@@ -0,0 +1,48 @@
+// Check elimination of proper range type when popping a $or clause SERVER-958.
+
+t = db.jstests_ori;
+t.drop();
+
+t.ensureIndex( {a:1,b:1} );
+t.ensureIndex( {a:1,c:1} );
+
+t.save( {a:1,b:[2,3],c:4} );
+t.save( {a:10,b:2,c:4} );
+
+// Check that proper results are returned.
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) );
+// Two $or clauses expected to be scanned.
+assert.eq( 2, t.find( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ).explain().clauses.length );
+assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) );
+
+t.drop();
+
+// Now try a different index order.
+
+t.ensureIndex( {b:1,a:1} );
+t.ensureIndex( {a:1,c:1} );
+
+t.save( {a:1,b:[2,3],c:4} );
+t.save( {a:10,b:2,c:4} );
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) );
+assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) );
+
+t.drop();
+
+// Now eliminate a range.
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+t.save( {a:[1,2],b:1} );
+t.save( {a:10,b:1} );
+
+assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ) );
+// Because a:1 is multikey, the value a:10 is scanned with the first clause.
+assert.isnull( t.find( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ).explain().clauses );
+
+assert.eq( 2, t.count( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ) );
+// Now a:10 is not scanned in the first clause so the second clause is not eliminated.
+assert.eq( 2, t.find( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ).explain().clauses.length );
diff --git a/jstests/orj.js b/jstests/orj.js
new file mode 100644
index 0000000..fa234f3
--- /dev/null
+++ b/jstests/orj.js
@@ -0,0 +1,121 @@
+// Test nested $or clauses SERVER-2585 SERVER-3192
+
+t = db.jstests_orj;
+t.drop();
+
+t.save( {a:1,b:2} );
+
+function check() {
+
+assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$or:[{$or:"a"}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[{$or:[]}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$or:[{$or:[ "a" ]}] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
+
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:"a"}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:[]}] } ).toArray(); } );
+assert.throws( function() { t.find( { x:0,$nor:[{$nor:[ "a" ]}] } ).toArray(); } );
+
+assert.eq( 1, t.find( {a:1,b:2} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).itcount() );
+assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).itcount() );
+assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).itcount() );
+
+assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).itcount() );
+
+assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
+assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
+
+}
+
+check();
+
+t.ensureIndex( {a:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1,b:1} );
+check();
+t.dropIndexes();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+t.ensureIndex( {a:1,b:1} );
+check();
+
+function checkHinted( hint ) {
+ assert.eq( 1, t.find( {a:1,b:2} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+ assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
+
+ assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
+ assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+}
+
+checkHinted( {$natural:1} );
+checkHinted( {a:1} );
+checkHinted( {b:1} );
+checkHinted( {a:1,b:1} ); \ No newline at end of file
diff --git a/jstests/ork.js b/jstests/ork.js
new file mode 100644
index 0000000..d6d4016
--- /dev/null
+++ b/jstests/ork.js
@@ -0,0 +1,11 @@
+// SERVER-2585 Test $or clauses within indexed top level $or clauses.
+
+t = db.jstests_ork;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:[1,2],b:5} );
+t.save( {a:[2,4],b:5} );
+
+assert.eq( 2, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:5}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
+assert.eq( 1, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:6}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
diff --git a/jstests/orl.js b/jstests/orl.js
new file mode 100644
index 0000000..2726975
--- /dev/null
+++ b/jstests/orl.js
@@ -0,0 +1,13 @@
+// SERVER-3445 Test using coarse multikey bounds for or range elimination.
+
+t = db.jstests_orl;
+t.drop();
+
+t.ensureIndex( {'a.b':1,'a.c':1} );
+// make the index multikey
+t.save( {a:{b:[1,2]}} );
+
+// SERVER-3445
+if ( 0 ) {
+assert( !t.find( {$or:[{'a.b':2,'a.c':3},{'a.b':2,'a.c':4}]} ).explain().clauses );
+} \ No newline at end of file
diff --git a/jstests/orm.js b/jstests/orm.js
new file mode 100644
index 0000000..dae75e4
--- /dev/null
+++ b/jstests/orm.js
@@ -0,0 +1,29 @@
+// Test dropping during a $or yield SERVER-3555
+
+t = db.jstests_orm;
+t.drop();
+
+clauses = [];
+for( i = 0; i < 10; ++i ) {
+ clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} );
+ clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} );
+}
+
+p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orm.drop() }' );
+for( j = 0; j < 5; ++j ) {
+ for( i = 0; i < 5000; ++i ) {
+ t.save( {a:i,i:i} );
+ t.save( {b:i,i:i} );
+ }
+ t.ensureIndex( {a:1} );
+ t.ensureIndex( {b:1} );
+ try {
+ t.find( {$or:clauses} ).itcount();
+ t.find( {$or:clauses} ).count();
+ t.update( {$or:clauses}, {} );
+ t.remove( {$or:clauses} );
+ } catch ( e ) {
+ }
+ db.getLastError();
+}
+p();
diff --git a/jstests/orn.js b/jstests/orn.js
new file mode 100644
index 0000000..c900bb8
--- /dev/null
+++ b/jstests/orn.js
@@ -0,0 +1,22 @@
+// Test dropping during an $or distinct yield SERVER-3555
+
+t = db.jstests_orn;
+t.drop();
+
+clauses = [];
+for( i = 0; i < 10; ++i ) {
+ clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} );
+ clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} );
+}
+
+p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orn.drop() }' );
+for( j = 0; j < 5; ++j ) {
+ for( i = 0; i < 5000; ++i ) {
+ t.save( {a:i,i:i} );
+ t.save( {b:i,i:i} );
+ }
+ t.ensureIndex( {a:1} );
+ t.ensureIndex( {b:1} );
+ t.distinct('a',{$or:clauses});
+}
+p();
diff --git a/jstests/profile1.js b/jstests/profile1.js
index 0e8009a..9654357 100644
--- a/jstests/profile1.js
+++ b/jstests/profile1.js
@@ -1,49 +1,125 @@
+print("profile1.js BEGIN");
try {
-/* With pre-created system.profile (capped) */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert(!db.getLastError(), "Z");
-assert.eq(0, db.runCommand({profile: -1}).was, "A");
+ function getProfileAString() {
+ var s = "\n";
+ db.system.profile.find().forEach( function(z){
+ s += tojson( z ) + " ,\n" ;
+ } );
+ return s;
+ }
-db.createCollection("system.profile", {capped: true, size: 1000});
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "B");
-assert.eq(1, db.system.profile.stats().capped, "C");
-var capped_size = db.system.profile.storageSize();
-assert.gt(capped_size, 999, "D");
-assert.lt(capped_size, 2000, "E");
+ /* With pre-created system.profile (capped) */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert(!db.getLastError(), "Z");
+ assert.eq(0, db.runCommand({profile: -1}).was, "A");
+
+ db.createCollection("system.profile", {capped: true, size: 10000});
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "B");
+ assert.eq(1, db.system.profile.stats().capped, "C");
+ var capped_size = db.system.profile.storageSize();
+ assert.gt(capped_size, 9999, "D");
+ assert.lt(capped_size, 20000, "E");
+
+ db.foo.findOne()
+
+ assert.eq( 4 , db.system.profile.find().count() , "E2" );
+
+ /* Make sure we can't drop if profiling is still on */
+ assert.throws( function(z){ db.getCollection("system.profile").drop(); } )
-db.foo.findOne()
+ /* With pre-created system.profile (un-capped) */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "F");
+
+ db.createCollection("system.profile");
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "G");
+ assert.eq(null, db.system.profile.stats().capped, "G1");
+
+ /* With no system.profile collection */
+ db.runCommand({profile: 0});
+ db.getCollection("system.profile").drop();
+ assert.eq(0, db.runCommand({profile: -1}).was, "H");
+
+ db.runCommand({profile: 2});
+ assert.eq(2, db.runCommand({profile: -1}).was, "I");
+ assert.eq(1, db.system.profile.stats().capped, "J");
+ var auto_size = db.system.profile.storageSize();
+ assert.gt(auto_size, capped_size, "K");
+
-assert.eq( 4 , db.system.profile.find().count() , "E2" );
+ db.eval("sleep(1)") // pre-load system.js
-/* Make sure we can't drop if profiling is still on */
-assert.throws( function(z){ db.getCollection("system.profile").drop(); } )
+ db.setProfilingLevel(2);
+ before = db.system.profile.count();
+ db.eval( "sleep(25)" )
+ db.eval( "sleep(120)" )
+ after = db.system.profile.count()
+ assert.eq( before + 3 , after , "X1" )
-/* With pre-created system.profile (un-capped) */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert.eq(0, db.runCommand({profile: -1}).was, "F");
+ /* sleep() could be inaccurate on certain platforms. let's check */
+ print("\nsleep 2 time actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(2); return new Date() - x;"));
+ }
+ print();
+ print("\nsleep 20 times actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(20); return new Date() - x;"));
+ }
+ print();
+ print("\nsleep 120 times actual:");
+ for (var i = 0; i < 4; i++) {
+ print(db.eval("var x = new Date(); sleep(120); return new Date() - x;"));
+ }
+ print();
-db.createCollection("system.profile");
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "G");
-assert.eq(null, db.system.profile.stats().capped, "G1");
+ function evalSleepMoreThan(millis,max){
+ var start = new Date();
+ db.eval("sleep("+millis+")");
+ var end = new Date();
+ var actual = end.getTime() - start.getTime();
+ if ( actual > ( millis + 5 ) ) {
+ print( "warning wanted to sleep for: " + millis + " but took: " + actual );
+ }
+ return actual >= max ? 1 : 0;
+ }
-/* With no system.profile collection */
-db.runCommand({profile: 0});
-db.getCollection("system.profile").drop();
-assert.eq(0, db.runCommand({profile: -1}).was, "H");
+ db.setProfilingLevel(1,100);
+ before = db.system.profile.count();
+ var delta = 0;
+ delta += evalSleepMoreThan( 15 , 100 );
+ delta += evalSleepMoreThan( 120 , 100 );
+ after = db.system.profile.count()
+ assert.eq( before + delta , after , "X2 : " + getProfileAString() )
-db.runCommand({profile: 2});
-assert.eq(2, db.runCommand({profile: -1}).was, "I");
-assert.eq(1, db.system.profile.stats().capped, "J");
-var auto_size = db.system.profile.storageSize();
-assert.gt(auto_size, capped_size, "K");
+ db.setProfilingLevel(1,20);
+ before = db.system.profile.count();
+ delta = 0;
+ delta += evalSleepMoreThan( 5 , 20 );
+ delta += evalSleepMoreThan( 120 , 20 );
+ after = db.system.profile.count()
+ assert.eq( before + delta , after , "X3 : " + getProfileAString() )
+
+ db.profile.drop();
+ db.setProfilingLevel(2)
+ var q = { _id : 5 };
+ var u = { $inc : { x : 1 } };
+ db.profile1.update( q , u );
+ var r = db.system.profile.find().sort( { $natural : -1 } )[0]
+ assert.eq( q , r.query , "Y1" );
+ assert.eq( u , r.updateobj , "Y2" );
+ assert.eq( "update" , r.op , "Y3" );
+ assert.eq("test.profile1", r.ns, "Y4");
+ print("profile1.js SUCCESS OK");
+
} finally {
// disable profiling for subsequent tests
assert.commandWorked( db.runCommand( {profile:0} ) );
-} \ No newline at end of file
+}
diff --git a/jstests/profile2.js b/jstests/profile2.js
new file mode 100644
index 0000000..929b463
--- /dev/null
+++ b/jstests/profile2.js
@@ -0,0 +1,19 @@
+print("profile2.js BEGIN");
+
+try {
+
+ assert.commandWorked( db.runCommand( {profile:2} ) );
+
+ huge = 'huge';
+ while (huge.length < 2*1024*1024){
+ huge += huge;
+ }
+
+ db.profile2.count({huge:huge}) // would make a huge entry in db.system.profile
+
+ print("profile2.js SUCCESS OK");
+
+} finally {
+ // disable profiling for subsequent tests
+ assert.commandWorked( db.runCommand( {profile:0} ) );
+}
diff --git a/jstests/profile3.js b/jstests/profile3.js
new file mode 100644
index 0000000..a6574b7
--- /dev/null
+++ b/jstests/profile3.js
@@ -0,0 +1,26 @@
+
+t = db.profile3;
+t.drop();
+
+try {
+ db.setProfilingLevel(0);
+
+ db.system.profile.drop();
+ assert.eq( 0 , db.system.profile.count() )
+
+ db.setProfilingLevel(2);
+
+ t.insert( { x : 1 } );
+ t.findOne( { x : 1 } );
+ t.find( { x : 1 } ).count();
+
+ db.system.profile.find().forEach( printjson )
+
+ db.setProfilingLevel(0);
+ db.system.profile.drop();
+
+}
+finally {
+ db.setProfilingLevel(0);
+}
+
diff --git a/jstests/push.js b/jstests/push.js
index 2cdd91c..9bcaa2f 100644
--- a/jstests/push.js
+++ b/jstests/push.js
@@ -17,6 +17,38 @@ assert.eq( "2" , t.findOne().a.toString() , "D" );
t.update( { _id : 2 } , { $push : { a : 3 } } );
t.update( { _id : 2 } , { $push : { a : 4 } } );
t.update( { _id : 2 } , { $push : { a : 5 } } );
-assert.eq( "2,3,4,5" , t.findOne().a.toString() , "D" );
+assert.eq( "2,3,4,5" , t.findOne().a.toString() , "E1" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.eq( "3,4,5" , t.findOne().a.toString() , "E2" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.eq( "4,5" , t.findOne().a.toString() , "E3" );
+
t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.eq( "3,4,5" , t.findOne().a.toString() , "D" );
+assert.isnull( db.getLastError() , "E4a" )
+assert.eq( "5" , t.findOne().a.toString() , "E4" );
+
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E5a")
+assert.eq( "" , t.findOne().a.toString() , "E5" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E6a" )
+assert.eq( "" , t.findOne().a.toString() , "E6" );
+
+t.update( { _id : 2 } , { $pop : { a : -1 } } );
+assert.isnull( db.getLastError() , "E7a" )
+assert.eq( "" , t.findOne().a.toString() , "E7" );
+
+t.update( { _id : 2 } , { $pop : { a : 1 } } );
+assert.isnull( db.getLastError() , "E8a" )
+assert.eq( "" , t.findOne().a.toString() , "E8" );
+
+t.update( { _id : 2 } , { $pop : { b : -1 } } );
+assert.isnull( db.getLastError() , "E4a" )
+
+t.update( { _id : 2 } , { $pop : { b : 1 } } );
+assert.isnull( db.getLastError() , "E4a" )
+
diff --git a/jstests/query1.js b/jstests/query1.js
index 9b40054..c3e276f 100644
--- a/jstests/query1.js
+++ b/jstests/query1.js
@@ -18,3 +18,6 @@ t.find().forEach(
assert.eq( num , 3 , "num" )
assert.eq( total , 8 , "total" )
+
+assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).itcount() , "B1" )
+assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).count() , "B2" )
diff --git a/jstests/regex2.js b/jstests/regex2.js
index b6a21f5..87d5cb4 100644
--- a/jstests/regex2.js
+++ b/jstests/regex2.js
@@ -60,3 +60,11 @@ assert.eq( 1 , t.find( { a : {$regex: a} } ).count() , "obj C D" );
assert.eq( 1 , t.find( { a : {$regex: b} } ).count() , "obj C E" );
assert.eq( 2 , t.find( { a : {$regex: a , $options: "i" } } ).count() , "obj C F is spidermonkey built with UTF-8 support?" );
+// Test s (DOT_ALL) option. Not supported with /regex/opts syntax
+t.drop();
+t.save({a:'1 2'})
+t.save({a:'1\n2'})
+assert.eq( 1 , t.find( { a : {$regex: '1.*2'} } ).count() );
+assert.eq( 2 , t.find( { a : {$regex: '1.*2', $options: 's'} } ).count() );
+
+
diff --git a/jstests/regex6.js b/jstests/regex6.js
index 8243313..5414324 100644
--- a/jstests/regex6.js
+++ b/jstests/regex6.js
@@ -6,6 +6,7 @@ t.save( { name : "eliot" } );
t.save( { name : "emily" } );
t.save( { name : "bob" } );
t.save( { name : "aaron" } );
+t.save( { name : "[with]some?symbols" } );
t.ensureIndex( { name : 1 } );
@@ -14,9 +15,15 @@ assert.eq( 1 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1"
assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" );
assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" );
assert.eq( 1 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" );
-assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
+assert.eq( 5 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" );
-assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
+// SERVER-2862
+assert.eq( 0 , t.find( { name : /^\Qblah\E/ } ).count() , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^blah/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index explain 6" );
+assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain().nscanned , "index explain 6" );
+assert.eq( 2 , t.find( { name : /^bob/ } ).explain().nscanned , "index explain 6" ); // proof nscanned == count+1
assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain().nscanned , "ie7" );
assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain().nscanned , "ie7" );
diff --git a/jstests/regexa.js b/jstests/regexa.js
new file mode 100644
index 0000000..b0d4719
--- /dev/null
+++ b/jstests/regexa.js
@@ -0,0 +1,19 @@
+// Test simple regex optimization with a regex | (bar) present - SERVER-3298
+
+t = db.jstests_regexa;
+t.drop();
+
+function check() {
+ assert.eq( 1, t.count( {a:/^(z|.)/} ) );
+ assert.eq( 1, t.count( {a:/^z|./} ) );
+ assert.eq( 0, t.count( {a:/^z(z|.)/} ) );
+ assert.eq( 1, t.count( {a:/^zz|./} ) );
+}
+
+t.save( {a:'a'} );
+
+check();
+t.ensureIndex( {a:1} );
+if ( 1 ) { // SERVER-3298
+check();
+}
diff --git a/jstests/remove10.js b/jstests/remove10.js
new file mode 100644
index 0000000..cf1dac4
--- /dev/null
+++ b/jstests/remove10.js
@@ -0,0 +1,28 @@
+// SERVER-2009 Update documents with adjacent indexed keys.
+// This test doesn't fail, it just prints an invalid warning message.
+
+if ( 0 ) { // SERVER-2009
+t = db.jstests_remove10;
+t.drop();
+t.ensureIndex( {i:1} );
+
+function arr( i ) {
+ ret = [];
+ for( j = i; j < i + 11; ++j ) {
+ ret.push( j );
+ }
+ return ret;
+}
+
+for( i = 0; i < 1100; i += 11 ) {
+ t.save( {i:arr( i )} );
+}
+
+s = startParallelShell( 't = db.jstests_remove10; for( j = 0; j < 1000; ++j ) { o = t.findOne( {i:Random.randInt(1100)} ); t.remove( {_id:o._id} ); t.insert( o ); }' );
+
+for( i = 0; i < 200; ++i ) {
+ t.find( {i:{$gte:0}} ).hint( {i:1} ).itcount();
+}
+
+s();
+} \ No newline at end of file
diff --git a/jstests/remove2.js b/jstests/remove2.js
index ff122a0..eb4ef07 100644
--- a/jstests/remove2.js
+++ b/jstests/remove2.js
@@ -21,6 +21,11 @@ function g() {
t.save( { x:[7,8,9], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
t.remove( {x : {$gte:3}, $atomic:x++ } );
+
+ assert( !db.getLastError() );
+ // $atomic within $and is not allowed.
+ t.remove( {x : {$gte:3}, $and:[{$atomic:true}] } );
+ assert( db.getLastError() );
assert( t.findOne({x:3}) == null );
assert( t.findOne({x:8}) == null );
diff --git a/jstests/remove9.js b/jstests/remove9.js
new file mode 100644
index 0000000..655594a
--- /dev/null
+++ b/jstests/remove9.js
@@ -0,0 +1,16 @@
+// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries.
+
+t = db.jstests_remove9;
+t.drop();
+t.ensureIndex( {i:1} );
+for( i = 0; i < 1000; ++i ) {
+ t.save( {i:i} );
+}
+
+s = startParallelShell( 't = db.jstests_remove9; for( j = 0; j < 5000; ++j ) { i = Random.randInt( 499 ) * 2; t.update( {i:i}, {$set:{i:2000}} ); t.remove( {i:2000} ); t.save( {i:i} ); }' );
+
+for( i = 0; i < 1000; ++i ) {
+ assert.eq( 500, t.find( {i:{$gte:0,$mod:[2,1]}} ).hint( {i:1} ).itcount() );
+}
+
+s();
diff --git a/jstests/rename.js b/jstests/rename.js
index 3ace968..d475cc6 100644
--- a/jstests/rename.js
+++ b/jstests/rename.js
@@ -31,17 +31,24 @@ a.drop();
b.drop();
c.drop();
-db.createCollection( "jstests_rename_a", {capped:true,size:100} );
-for( i = 0; i < 10; ++i ) {
+// TODO: too many numbers hard coded here
+// this test depends precisely on record size and hence may not be very reliable
+// note we use floats to make sure numbers are represented as doubles for both SM and v8, since test relies on record size
+db.createCollection( "jstests_rename_a", {capped:true,size:10000} );
+for( i = 0.1; i < 10; ++i ) {
a.save( { i: i } );
}
assert.commandWorked( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_b"} ) );
-assert.eq( 1, b.count( {i:9} ) );
-for( i = 10; i < 20; ++i ) {
+assert.eq( 1, b.count( {i:9.1} ) );
+for( i = 10.1; i < 250; ++i ) {
b.save( { i: i } );
}
-assert.eq( 0, b.count( {i:9} ) );
-assert.eq( 1, b.count( {i:19} ) );
+
+//res = b.find().sort({i:1});
+//while (res.hasNext()) printjson(res.next());
+
+assert.eq( 0, b.count( {i:9.1} ) );
+assert.eq( 1, b.count( {i:19.1} ) );
assert( db.system.namespaces.findOne( {name:"test.jstests_rename_b" } ) );
assert( !db.system.namespaces.findOne( {name:"test.jstests_rename_a" } ) );
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 15fc983..4a6091d 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -60,7 +60,7 @@ r = function( key , v ){
correct = { a : 2 , b : 1 };
function checkMR( t ){
- var res = t.mapReduce( m , r , "basic1_out" );
+ var res = t.mapReduce( m , r , { out : { inline : 1 } } )
assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
}
@@ -148,6 +148,23 @@ x = { _id : 1 , x : 1 }
assert.eq( x , am.mu1.findOne() , "mu1" );
assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEqual( x , z ); } , "mu2" )
+// profiling - this sould be last
+
+am.setProfilingLevel( 2 )
+am.foo.insert( { x : 1 } )
+am.foo.findOne()
+block();
+assert.eq( 2 , am.system.profile.count() , "P1" )
+assert.eq( 0 , as.system.profile.count() , "P2" )
+
+assert.eq( 1 , as.foo.findOne().x , "P3" );
+assert.eq( 0 , as.system.profile.count() , "P4" )
+
+assert( as.getCollectionNames().indexOf( "system.profile" ) < 0 , "P4.5" )
+
+as.setProfilingLevel(2)
+as.foo.findOne();
+assert.eq( 1 , as.system.profile.count() , "P5" )
rt.stop();
diff --git a/jstests/repl/dbcase.js b/jstests/repl/dbcase.js
new file mode 100644
index 0000000..10a5a61
--- /dev/null
+++ b/jstests/repl/dbcase.js
@@ -0,0 +1,95 @@
+// Test db case checking with replication SERVER-2111
+
+baseName = "jstests_repl_dbcase";
+
+rt = new ReplTest( baseName );
+
+m = rt.start( true );
+s = rt.start( false );
+
+n1 = "dbname";
+n2 = "dbNAme";
+
+/**
+ * The value of n should be n1 or n2. Check that n is soon present while its
+ * opposite is not present.
+ */
+function check( n ) {
+ assert.soon( function() {
+ try {
+ // Our db name changes may trigger an exception - SERVER-3189.
+ names = s.getDBNames();
+ } catch (e) {
+ return false;
+ }
+ n1Idx = names.indexOf( n1 );
+ n2Idx = names.indexOf( n2 );
+ if ( n1Idx != -1 && n2Idx != -1 ) {
+ // n1 and n2 may both be reported as present transiently.
+ return false;
+ }
+ // Return true if we matched expected n.
+ return -1 != names.indexOf( n );
+ } );
+}
+
+/** Allow some time for additional operations to be processed by the slave. */
+function checkTwice( n ) {
+ check( n );
+ // zzz is expected to be cloned after n1 and n2 because of its position in the alphabet.
+ m.getDB( "zzz" ).c.save( {} );
+ assert.soon( function() { return s.getDB( "zzz" ).c.count(); } )
+ check( n );
+ m.getDB( "zzz" ).dropDatabase();
+}
+
+/**
+ * The slave may create in memory db names on the master matching old dbs it is
+ * attempting to clone. This function forces operation 'cmd' by deleting those
+ * in memory dbs if necessary. This function should only be called in cases where
+ * 'cmd' would succeed if not for the in memory dbs on master created by the slave.
+ */
+function force( cmd ) {
+ print( "cmd: " + cmd );
+ eval( cmd );
+ while( m1.getLastError() ) {
+ sleep( 100 );
+ m1.dropDatabase();
+ m2.dropDatabase();
+ eval( cmd );
+ }
+}
+
+m1 = m.getDB( n1 );
+m2 = m.getDB( n2 );
+
+m1.c.save( {} );
+m2.c.save( {} ); // will fail due to conflict
+check( n1 );
+
+m1.dropDatabase();
+force( "m2.c.save( {} );" ); // will now succeed
+check( n2 );
+
+m2.dropDatabase();
+force( "m1.c.save( {} );" );
+check( n1 );
+
+for( i = 0; i < 5; ++i ) {
+ m1.dropDatabase();
+ force( "m2.c.save( {} );" );
+ m2.dropDatabase();
+ force( "m1.c.save( {} );" );
+}
+checkTwice( n1 );
+
+m1.dropDatabase();
+force( "m2.c.save( {} );" );
+
+for( i = 0; i < 5; ++i ) {
+ m2.dropDatabase();
+ force( "m1.c.save( {} );" );
+ m1.dropDatabase();
+ force( "m2.c.save( {} );" );
+}
+checkTwice( n2 );
diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js
new file mode 100644
index 0000000..100f469
--- /dev/null
+++ b/jstests/repl/drop_dups.js
@@ -0,0 +1,68 @@
+
+var rt = new ReplTest( "drop_dups" );
+
+m = rt.start( true );
+s = rt.start( false );
+
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+}
+
+am = m.getDB( "foo" );
+as = s.getDB( "foo" );
+
+function run( createInBackground ) {
+
+ collName = "foo" + ( createInBackground ? "B" : "F" );
+
+ am[collName].drop();
+ am.blah.insert( { x : 1 } )
+ assert.soon( function(){
+ block();
+ return as.blah.findOne();
+ }
+ );
+
+
+ for ( i=0; i<10; i++ ) {
+ am[collName].insert( { _id : i , x : Math.floor( i / 2 ) } )
+ }
+
+ block();
+
+ am.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } );
+ am.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } );
+
+ as.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } );
+ as.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } );
+
+ assert.eq( as[collName].count() , am[collName].count() );
+
+ function mymap(z) {
+ return z._id + ":" + z.x + ",";
+ }
+
+
+ if ( am.serverStatus().mem.bits == 64 ) {
+ assert.neq( tojson(am[collName].find().map(mymap)) ,
+ tojson(as[collName].find().map(mymap)) , "order is not supposed to be same on master and slave but it is" );
+ }
+
+
+ am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } );
+ am.blah.insert( { x : 1 } )
+ block();
+
+ assert.eq( 2 , am[collName].getIndexKeys().length , "A1 : " + createInBackground )
+ assert.eq( 2 , as[collName].getIndexKeys().length , "A2 : " + createInBackground )
+
+ assert.eq( am[collName].find().sort( { _id : 1 } ).map(mymap) ,
+ as[collName].find().sort( { _id : 1 } ).map(mymap) , "different things dropped on master and slave" );
+
+
+}
+
+run( false )
+run( true )
+
+rt.stop()
diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js
index 4932d5a..97fdc14 100644
--- a/jstests/repl/mastermaster1.js
+++ b/jstests/repl/mastermaster1.js
@@ -4,32 +4,45 @@
ports = allocatePorts( 2 )
left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } )
-right = startMongodTest( ports[1] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
-
-print( "check 1" )
x = left.getDB( "admin" ).runCommand( "ismaster" )
assert( x.ismaster , "left: " + tojson( x ) )
+right = startMongodTest( ports[1] , "mastermaster1right" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } )
+
x = right.getDB( "admin" ).runCommand( "ismaster" )
assert( x.ismaster , "right: " + tojson( x ) )
+print( "check 1" )
+
+
ldb = left.getDB( "test" )
rdb = right.getDB( "test" )
print( "check 2" )
ldb.foo.insert( { _id : 1 , x : "eliot" } )
-var result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } );
+result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } );
printjson(result);
rdb.foo.insert( { _id : 2 , x : "sara" } )
-result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } )
+result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } )
printjson(result);
print( "check 3" )
+print( "left" )
+ldb.foo.find().forEach( printjsononeline )
+print( "right" )
+rdb.foo.find().forEach( printjsononeline )
+
+print( "oplog" )
+
+rdb.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().forEach( printjsononeline )
+
+/*
assert.eq( 2 , ldb.foo.count() , "B1" )
assert.eq( 2 , rdb.foo.count() , "B2" )
+*/
print( "going to stop everything" )
diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js
new file mode 100644
index 0000000..d39e747
--- /dev/null
+++ b/jstests/repl/mod_move.js
@@ -0,0 +1,69 @@
+
+// test repl basics
+// data on master/slave is the same
+
+var rt = new ReplTest( "mod_move" );
+
+m = rt.start( true , { oplogSize : 50 } );
+
+function block(){
+ am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } )
+}
+
+am = m.getDB( "foo" );
+
+function check( note ){
+ var start = new Date();
+ var x,y;
+ while ( (new Date()).getTime() - start.getTime() < 5 * 60 * 1000 ){
+ x = am.runCommand( "dbhash" );
+ y = as.runCommand( "dbhash" );
+ if ( x.md5 == y.md5 )
+ return;
+ sleep( 200 );
+ }
+ assert.eq( x.md5 , y.md5 , note );
+}
+
+// insert a lot of 'big' docs
+// so when we delete them the small docs move here
+
+BIG = 100000;
+N = BIG * 2;
+
+s : "asdasdasdasdasdasdasdadasdadasdadasdasdas"
+
+for ( i=0; i<BIG; i++ ) {
+ am.a.insert( { _id : i , s : 1 , x : 1 } )
+}
+for ( ; i<N; i++ ) {
+ am.a.insert( { _id : i , s : 1 } )
+}
+for ( i=0; i<BIG; i++ ) {
+ am.a.remove( { _id : i } )
+}
+am.getLastError();
+assert.eq( BIG , am.a.count() )
+
+assert.eq( 1 , am.a.stats().paddingFactor , "A2" )
+
+
+// start slave
+s = rt.start( false );
+as = s.getDB( "foo" );
+for ( i=N-1; i>=BIG; i-- ) {
+ am.a.update( { _id : i } , { $set : { x : 1 } } )
+ if ( i == N ) {
+ am.getLastError()
+ assert.lt( as.a.count() , BIG , "B1" )
+ print( "NOW : " + as.a.count() )
+ }
+}
+
+check( "B" )
+
+rt.stop();
+
+
+
+
diff --git a/jstests/repl/pair1.js b/jstests/repl/pair1.js
deleted file mode 100644
index 84dd7b7..0000000
--- a/jstests/repl/pair1.js
+++ /dev/null
@@ -1,100 +0,0 @@
-// Basic pairing test
-
-var baseName = "jstests_pair1test";
-
-debug = function( p ) {
-// print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- check( s );
-}
-
-check = function( s ) {
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
- sleep( 500 ); // wait for sync clone to finish up
-}
-
-// check that slave reads and writes are guarded
-checkSlaveGuard = function( s ) {
- var t = s.getDB( baseName + "-temp" ).temp;
- assert.throws( t.find().count, [], "not master" );
- assert.throws( t.find(), [], "not master", "find did not assert" );
-
- checkError = function() {
- assert.eq( "not master", s.getDB( "admin" ).getLastError() );
- s.getDB( "admin" ).resetError();
- }
- s.getDB( "admin" ).resetError();
- t.save( {x:1} );
- checkError();
- t.update( {}, {x:2}, true );
- checkError();
- t.remove( {x:0} );
- checkError();
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState();
-
- checkSlaveGuard( rp.slave() );
-
- checkWrite( rp.master(), rp.slave() );
-
- debug( "kill first" );
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.slave().host );
- writeOne( rp.master() );
-
- debug( "restart first" );
- rp.start( true );
- rp.waitForSteadyState();
- check( rp.slave() );
- checkWrite( rp.master(), rp.slave() );
-
- debug( "kill second" );
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.slave().host );
-
- debug( "restart second" );
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.master().host );
- checkWrite( rp.master(), rp.slave() );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/pair2.js b/jstests/repl/pair2.js
deleted file mode 100644
index 2491fb2..0000000
--- a/jstests/repl/pair2.js
+++ /dev/null
@@ -1,71 +0,0 @@
-// Pairing resync
-
-var baseName = "jstests_pair2test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- assert( im );
- return im.ismaster;
-}
-
-soonCount = function( m, count ) {
- assert.soon( function() {
-// print( "counting" );
-//// print( "counted: " + l.getDB( baseName ).z.find().count() );
- return m.getDB( baseName ).z.find().count() == count;
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState();
-
- rp.slave().setSlaveOk();
- mz = rp.master().getDB( baseName ).z;
-
- mz.save( { _id: new ObjectId() } );
- soonCount( rp.slave(), 1 );
- assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
-
- sleep( 3000 ); // allow time to finish clone and save ReplSource
- rp.killNode( rp.slave(), signal );
- rp.waitForSteadyState( [ 1, null ], rp.master().host );
-
- big = new Array( 2000 ).toString();
- for( i = 0; i < 1000; ++i )
- mz.save( { _id: new ObjectId(), i: i, b: big } );
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ], rp.master().host );
-
- sleep( 15000 );
-
- rp.slave().setSlaveOk();
- assert.soon( function() {
- ret = rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } );
-// printjson( ret );
- return 1 == ret.ok;
- } );
-
- sleep( 8000 );
- soonCount( rp.slave(), 1001 );
- sz = rp.slave().getDB( baseName ).z
- assert.eq( 1, sz.find( { i: 0 } ).count() );
- assert.eq( 1, sz.find( { i: 999 } ).count() );
-
- assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/pair3.js b/jstests/repl/pair3.js
deleted file mode 100644
index d5fdf7e..0000000
--- a/jstests/repl/pair3.js
+++ /dev/null
@@ -1,245 +0,0 @@
-// test arbitration
-
-var baseName = "jstests_pair3test";
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-// bring up node connections before arbiter connections so that arb can forward to node when expected
-connect = function() {
- if ( lp == null ) {
- print("connecting lp");
- lp = startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- }
- if ( rp == null ) {
- print("connecting rp");
- rp = startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
- }
- if ( al == null ) {
- print("connecting al");
- al = startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort );
- }
- if ( ar == null ) {
- print("connecting ar");
- ar = startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort );
- }
-}
-
-disconnectNode = function( mongo ) {
- if ( lp ) {
- print("disconnecting lp: "+lpPort);
- stopMongoProgram( lpPort );
- lp = null;
- }
- if ( rp ) {
- print("disconnecting rp: "+rpPort);
- stopMongoProgram( rpPort );
- rp = null;
- }
- if ( mongo.host.match( new RegExp( "^127.0.0.1:" + lPort + "$" ) ) ) {
- print("disconnecting al: "+alPort);
- stopMongoProgram( alPort );
- al = null;
- } else if ( mongo.host.match( new RegExp( "^127.0.0.1:" + rPort + "$" ) ) ) {
- print("disconnecting ar: "+arPort);
- stopMongoProgram( arPort );
- ar = null;
- } else {
- assert( false, "don't know how to disconnect node: " + mongo );
- }
-}
-
-doTest1 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
-
- print("normal startup");
- pair.start();
- pair.waitForSteadyState();
-
- print("disconnect slave");
- disconnectNode( pair.slave() );
- pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ -3, -3 ] );
-
- print("reconnect");
- connect();
- pair.waitForSteadyState();
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
-
- print("disconnect new master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ -3, -3 ] );
-
- print("reconnect");
- connect();
- pair.waitForSteadyState();
-
- print("disconnect slave");
- disconnectNode( pair.slave() );
- pair.waitForSteadyState( [ 1, -3 ], pair.master().host );
-
- print("reconnect slave");
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
-
- print("disconnect master");
- disconnectNode( pair.master() );
- pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true );
-
- print("reconnect old master");
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.master().host );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// this time don't start connected
-doTest2 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState( [ -3, -3 ] );
-
- startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort );
-
- // there hasn't been an initial sync, no no node will become master
-
- for( i = 0; i < 10; ++i ) {
- assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 );
- sleep( 500 );
- }
-
- stopMongoProgram( arPort );
-
- startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort );
-
- for( i = 0; i < 10; ++i ) {
- assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 );
- sleep( 500 );
- }
-
- stopMongoProgram( alPort );
-
- // connect l and r without a
-
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-
- pair.waitForSteadyState( [ 1, 0 ] );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// recover from master - master setup
-doTest3 = function() {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- // now can only talk to arbiter
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- // recover
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
- pair.waitForSteadyState( [ 1, 0 ], null, true );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-// check that initial sync is persistent
-doTest4 = function( signal ) {
- al = ar = lp = rp = null;
- ports = allocatePorts( 7 );
- aPort = ports[ 0 ];
- alPort = ports[ 1 ];
- arPort = ports[ 2 ];
- lPort = ports[ 3 ];
- lpPort = ports[ 4 ];
- rPort = ports[ 5 ];
- rpPort = ports[ 6 ];
-
- connect();
-
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort );
-
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- pair.killNode( pair.left(), signal );
- pair.killNode( pair.right(), signal );
- stopMongoProgram( rpPort );
- stopMongoProgram( lpPort );
-
- // now can only talk to arbiter
- pair.start( true );
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-}
-
-doTest1();
-doTest2();
-doTest3();
-doTest4( 15 );
-doTest4( 9 );
diff --git a/jstests/repl/pair4.js b/jstests/repl/pair4.js
deleted file mode 100644
index c04433e..0000000
--- a/jstests/repl/pair4.js
+++ /dev/null
@@ -1,160 +0,0 @@
-// data consistency after master-master
-
-var baseName = "jstests_pair4test";
-
-debug = function( o ) {
- printjson( o );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-write = function( m, n, id ) {
- if ( id ) {
- save = { _id:id, n:n };
- } else {
- save = { n:n };
- }
- m.getDB( baseName ).getCollection( baseName ).save( save );
-}
-
-check = function( m, n, id ) {
- m.setSlaveOk();
- if ( id ) {
- find = { _id:id, n:n };
- } else {
- find = { n:n };
- }
- assert.soon( function() { return m.getDB( baseName ).getCollection( baseName ).find( find ).count() > 0; },
- "failed waiting for " + m + " value of n to be " + n );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "count failed for " + m );
-}
-
-coll = function( m ) {
- return m.getDB( baseName ).getCollection( baseName );
-}
-
-db2Coll = function( m ) {
- return m.getDB( baseName + "_second" ).getCollection( baseName );
-}
-
-doTest = function( recover, newMaster, newSlave ) {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- firstMaster = pair.master();
- firstSlave = pair.slave();
-
- write( pair.master(), 0 );
- write( pair.master(), 1 );
- check( pair.slave(), 0 );
- check( pair.slave(), 1 );
-
- // now each can only talk to arbiter
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- m = newMaster();
- write( m, 10 );
- write( m, 100, "a" );
- coll( m ).update( {n:1}, {$set:{n:2}} );
- db2Coll( m ).save( {n:500} );
- db2Coll( m ).findOne();
-
- s = newSlave();
- write( s, 20 );
- write( s, 200, "a" );
- coll( s ).update( {n:1}, {n:1,m:3} );
- db2Coll( s ).save( {_id:"a",n:600} );
- db2Coll( s ).findOne();
-
- // recover
- recover();
-
- nodes = [ pair.right(), pair.left() ];
-
- nodes.forEach( function( x ) { checkCount( x, 5 ); } );
- nodes.forEach( function( x ) { [ 0, 10, 20, 100 ].forEach( function( y ) { check( x, y ); } ); } );
-
- checkM = function( c ) {
- assert.soon( function() {
- obj = coll( c ).findOne( {n:2} );
- printjson( obj );
- return obj.m == undefined;
- }, "n:2 test for " + c + " failed" );
- };
- nodes.forEach( function( x ) { checkM( x ); } );
-
- // check separate database
- nodes.forEach( function( x ) { assert.soon( function() {
- r = db2Coll( x ).findOne( {_id:"a"} );
- debug( r );
- if ( r == null ) {
- return false;
- }
- return 600 == r.n;
- } ) } );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-// right will be master on recovery b/c both sides will have completed initial sync
-debug( "basic test" );
-doTest( function() {
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
- }, function() { return pair.right(); }, function() { return pair.left(); } );
-
-doRestartTest = function( signal ) {
- doTest( function() {
- if ( signal == 9 ) {
- sleep( 3000 );
- }
- pair.killNode( firstMaster, signal );
- connect();
- pair.start( true );
- pair.waitForSteadyState( [ 1, 0 ], firstSlave.host, true );
- }, function() { return firstSlave; }, function() { return firstMaster; } );
-}
-
-debug( "sigterm restart test" );
-doRestartTest( 15 ) // SIGTERM
-
-debug( "sigkill restart test" );
-doRestartTest( 9 ) // SIGKILL
diff --git a/jstests/repl/pair5.js b/jstests/repl/pair5.js
deleted file mode 100644
index de7e2d5..0000000
--- a/jstests/repl/pair5.js
+++ /dev/null
@@ -1,95 +0,0 @@
-// writes to new master while making master-master logs consistent
-
-var baseName = "jstests_pair5test";
-
-debug = function( p ) {
- print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-write = function( m, n, id ) {
- if ( id ) {
- save = { _id:id, n:n };
- } else {
- save = { n:n };
- }
- m.getDB( baseName ).getCollection( baseName ).save( save );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "count failed for " + m );
-}
-
-doTest = function( nSlave, opIdMem ) {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- // now each can only talk to arbiter
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- // left will become slave (b/c both completed initial sync)
- for( i = 0; i < nSlave; ++i ) {
- write( pair.left(), i, i );
- }
- pair.left().getDB( baseName ).getCollection( baseName ).findOne();
-
- for( i = 10000; i < 15000; ++i ) {
- write( pair.right(), i, i );
- }
- pair.right().getDB( baseName ).getCollection( baseName ).findOne();
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- pair.master().getDB( baseName ).getCollection( baseName ).update( {_id:nSlave - 1}, {_id:nSlave - 1,n:-1}, true );
- assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n );
- checkCount( pair.master(), 5000 + nSlave );
- assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n );
- pair.slave().setSlaveOk();
- assert.soon( function() {
- n = pair.slave().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n;
- print( n );
- return -1 == n;
- } );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-doTest( 5000, 100000000 );
-doTest( 5000, 100 ); // force op id converstion to collection based storage
diff --git a/jstests/repl/pair6.js b/jstests/repl/pair6.js
deleted file mode 100644
index b249fc0..0000000
--- a/jstests/repl/pair6.js
+++ /dev/null
@@ -1,115 +0,0 @@
-// pairing cases where oplogs run out of space
-
-var baseName = "jstests_pair6test";
-
-debug = function( p ) {
- print( p );
-}
-
-ismaster = function( n ) {
- var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
- print( "ismaster: " + tojson( im ) );
- assert( im, "command ismaster failed" );
- return im.ismaster;
-}
-
-connect = function() {
- startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort );
- startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort );
-}
-
-disconnect = function() {
- stopMongoProgram( lpPort );
- stopMongoProgram( rpPort );
-}
-
-checkCount = function( m, c ) {
- m.setSlaveOk();
- assert.soon( function() {
- actual = m.getDB( baseName ).getCollection( baseName ).find().count();
- print( actual );
- return c == actual; },
- "expected count " + c + " for " + m );
-}
-
-resetSlave = function( s ) {
- s.setSlaveOk();
- assert.soon( function() {
- ret = s.getDB( "admin" ).runCommand( { "resync" : 1 } );
- // printjson( ret );
- return 1 == ret.ok;
- } );
-}
-
-big = new Array( 2000 ).toString();
-
-doTest = function() {
- ports = allocatePorts( 5 );
- aPort = ports[ 0 ];
- lPort = ports[ 1 ];
- lpPort = ports[ 2 ];
- rPort = ports[ 3 ];
- rpPort = ports[ 4 ];
-
- // start normally
- connect();
- a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort );
- r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort );
- pair = new ReplPair( l, r, a );
- pair.start();
- pair.waitForSteadyState();
-
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- print( "test one" );
-
- // fill new slave oplog
- for( i = 0; i < 1000; ++i ) {
- pair.left().getDB( baseName ).getCollection( baseName ).save( {b:big} );
- }
- pair.left().getDB( baseName ).getCollection( baseName ).findOne();
-
- // write single to new master
- pair.right().getDB( baseName ).getCollection( baseName ).save( {} );
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- resetSlave( pair.left() );
-
- checkCount( pair.left(), 1 );
- checkCount( pair.right(), 1 );
-
- pair.right().getDB( baseName ).getCollection( baseName ).remove( {} );
- checkCount( pair.left(), 0 );
-
- disconnect();
- pair.waitForSteadyState( [ 1, 1 ], null, true );
-
- print( "test two" );
-
- // fill new master oplog
- for( i = 0; i < 1000; ++i ) {
- pair.right().getDB( baseName ).getCollection( baseName ).save( {b:big} );
- }
-
- pair.left().getDB( baseName ).getCollection( baseName ).save( {_id:"abcde"} );
-
- connect();
- pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true );
-
- sleep( 15000 );
-
- resetSlave( pair.left() );
-
- checkCount( pair.left(), 1000 );
- checkCount( pair.right(), 1000 );
- assert.eq( 0, pair.left().getDB( baseName ).getCollection( baseName ).find( {_id:"abcde"} ).count() );
-
- ports.forEach( function( x ) { stopMongoProgram( x ); } );
-
-}
-
-doTest(); \ No newline at end of file
diff --git a/jstests/repl/pair7.js b/jstests/repl/pair7.js
deleted file mode 100644
index 52ef91f..0000000
--- a/jstests/repl/pair7.js
+++ /dev/null
@@ -1,85 +0,0 @@
-// pairing with auth
-
-var baseName = "jstests_pair7test";
-
-setAdmin = function( n ) {
- n.getDB( "admin" ).addUser( "super", "super" );
- n.getDB( "local" ).addUser( "repl", "foo" );
- n.getDB( "local" ).system.users.findOne();
-}
-
-auth = function( n ) {
- return n.getDB( baseName ).auth( "test", "test" );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- m = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "-left", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- setAdmin( m );
- stopMongod( ports[ 1 ] );
-
- m = startMongod( "--port", ports[ 2 ], "--dbpath", "/data/db/" + baseName + "-right", "--nohttpinterface", "--bind_ip", "127.0.0.1" );
- setAdmin( m );
- stopMongod( ports[ 2 ] );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
- r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] );
-
- rp = new ReplPair( l, r, a );
- rp.start( true );
- rp.waitForSteadyState();
-
- rp.master().getDB( "admin" ).auth( "super", "super" );
- rp.master().getDB( baseName ).addUser( "test", "test" );
- auth( rp.master() ); // reauth
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
-
- ma = rp.master().getDB( baseName ).a;
- ma.save( {} );
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 1 == sa.count(); } );
-
- rp.killNode( rp.slave(), signal );
- rp.waitForSteadyState( [ 1, null ] );
- ma.save( {} );
-
- rp.start( true );
- rp.waitForSteadyState();
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 2 == sa.count(); } );
-
- ma.save( {a:1} );
- assert.soon( function() { return 1 == sa.count( {a:1} ); } );
-
- ma.update( {a:1}, {b:2} );
- assert.soon( function() { return 1 == sa.count( {b:2} ); } );
-
- ma.remove( {b:2} );
- assert.soon( function() { return 0 == sa.count( {b:2} ); } );
-
- rp.killNode( rp.master(), signal );
- rp.waitForSteadyState( [ 1, null ] );
- ma = sa;
- ma.save( {} );
-
- rp.start( true );
- rp.waitForSteadyState();
- assert.soon( function() { return auth( rp.slave() ); } );
- rp.slave().setSlaveOk();
- sa = rp.slave().getDB( baseName ).a;
- assert.soon( function() { return 3 == sa.count(); } );
-
- ma.save( {} );
- assert.soon( function() { return 4 == sa.count(); } );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/repl2.js b/jstests/repl/repl2.js
index 42b0caf..b290c61 100644
--- a/jstests/repl/repl2.js
+++ b/jstests/repl/repl2.js
@@ -1,34 +1,43 @@
// Test resync command
soonCount = function( count ) {
- assert.soon( function() {
+ assert.soon( function() {
// print( "check count" );
// print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB("foo").a.find().count() == count;
- } );
+ return s.getDB("foo").a.find().count() == count;
+ } );
}
doTest = function( signal ) {
-
+ print("signal: "+signal);
+
var rt = new ReplTest( "repl2tests" );
// implicit small oplog makes slave get out of sync
- m = rt.start( true );
+ m = rt.start( true, { oplogSize : "1" } );
s = rt.start( false );
-
+
am = m.getDB("foo").a
-
+
am.save( { _id: new ObjectId() } );
soonCount( 1 );
assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
rt.stop( false , signal );
-
+
big = new Array( 2000 ).toString();
for( i = 0; i < 1000; ++i )
am.save( { _id: new ObjectId(), i: i, b: big } );
s = rt.start( false , null , true );
- assert.soon( function() { return 1 == s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok; } );
+
+ print("earliest op in master: "+tojson(m.getDB("local").oplog.$main.find().sort({$natural:1}).limit(1).next()));
+ print("latest op on slave: "+tojson(s.getDB("local").sources.findOne()));
+
+ assert.soon( function() {
+ var result = s.getDB( "admin" ).runCommand( { "resync" : 1 } );
+ print("resync says: "+tojson(result));
+ return result.ok == 1;
+ } );
soonCount( 1001 );
assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
@@ -36,7 +45,7 @@ doTest = function( signal ) {
as = s.getDB("foo").a
assert.eq( 1, as.find( { i: 0 } ).count() );
assert.eq( 1, as.find( { i: 999 } ).count() );
-
+
assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
rt.stop();
diff --git a/jstests/repl/repl3.js b/jstests/repl/repl3.js
index d3c3848..5ace9b6 100644
--- a/jstests/repl/repl3.js
+++ b/jstests/repl/repl3.js
@@ -10,38 +10,42 @@ soonCount = function( count ) {
} );
}
-doTest = function( signal ) {
-
- rt = new ReplTest( "repl3tests" );
-
- m = rt.start( true );
- s = rt.start( false );
-
- am = m.getDB( baseName ).a
-
- am.save( { _id: new ObjectId() } );
- soonCount( 1 );
- rt.stop( false, signal );
-
- big = new Array( 2000 ).toString();
- for( i = 0; i < 1000; ++i )
- am.save( { _id: new ObjectId(), i: i, b: big } );
-
- s = rt.start( false, { autoresync: null }, true );
-
+doTest = function (signal) {
+
+ print("repl3.js doTest(" + signal + ")")
+
+ rt = new ReplTest("repl3tests");
+
+ m = rt.start(true);
+ s = rt.start(false);
+
+ am = m.getDB(baseName).a
+
+ am.save({ _id: new ObjectId() });
+ soonCount(1);
+ rt.stop(false, signal);
+
+ big = new Array(2000).toString();
+ for (i = 0; i < 1000; ++i)
+ am.save({ _id: new ObjectId(), i: i, b: big });
+
+ s = rt.start(false, { autoresync: null }, true);
+
// after SyncException, mongod waits 10 secs.
- sleep( 15000 );
-
+ sleep(15000);
+
// Need the 2 additional seconds timeout, since commands don't work on an 'allDead' node.
- soonCount( 1001 );
- as = s.getDB( baseName ).a
- assert.eq( 1, as.find( { i: 0 } ).count() );
- assert.eq( 1, as.find( { i: 999 } ).count() );
-
- assert.commandFailed( s.getDB( "admin" ).runCommand( { "resync" : 1 } ) );
+ soonCount(1001);
+ as = s.getDB(baseName).a
+ assert.eq(1, as.find({ i: 0 }).count());
+ assert.eq(1, as.find({ i: 999 }).count());
+
+ assert.commandFailed(s.getDB("admin").runCommand({ "resync": 1 }));
rt.stop();
}
doTest( 15 ); // SIGTERM
doTest( 9 ); // SIGKILL
+
+print("repl3.js OK")
diff --git a/jstests/repl/replacePeer1.js b/jstests/repl/replacePeer1.js
deleted file mode 100644
index b3743ce..0000000
--- a/jstests/repl/replacePeer1.js
+++ /dev/null
@@ -1,82 +0,0 @@
-// test replace peer on master
-
-var baseName = "jstests_replacepeer1test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 4 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState( [ 1, 0 ] );
- rightMaster = ( rp.master().host == rp.right().host );
-
- checkWrite( rp.master(), rp.slave() );
-
- rp.killNode( rp.slave(), signal );
-
- writeOne( rp.master() );
-
- assert.commandWorked( rp.master().getDB( "admin" ).runCommand( {replacepeer:1} ) );
-
- rp.killNode( rp.master(), signal );
- rp.killNode( rp.arbiter(), signal );
-
- if ( rightMaster ) {
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( o, r, a );
- resetDbpath( "/data/db/" + baseName + "-left" );
- } else {
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( l, o, a );
- resetDbpath( "/data/db/" + baseName + "-right" );
- }
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ] );
-
- rp.slave().setSlaveOk();
- assert.eq( 2, rp.master().getDB( baseName ).z.find().toArray().length );
- assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- checkWrite( rp.master(), rp.slave() );
- assert.eq( 3, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js
deleted file mode 100644
index 33b054a..0000000
--- a/jstests/repl/replacePeer2.js
+++ /dev/null
@@ -1,86 +0,0 @@
-// test replace peer on slave
-
-var baseName = "jstests_replacepeer2test";
-
-ismaster = function( n ) {
- im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } );
-// print( "ismaster: " + tojson( im ) );
- assert( im );
- return im.ismaster;
-}
-
-var writeOneIdx = 0;
-
-writeOne = function( n ) {
- n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } );
-}
-
-getCount = function( n ) {
- return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length;
-}
-
-checkWrite = function( m, s ) {
- writeOne( m );
- assert.eq( 1, getCount( m ) );
- s.setSlaveOk();
- assert.soon( function() {
- return 1 == getCount( s );
- } );
-}
-
-doTest = function( signal ) {
-
- ports = allocatePorts( 4 );
-
- a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" );
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
- rp = new ReplPair( l, r, a );
- rp.start();
- rp.waitForSteadyState( [ 1, 0 ] );
- leftSlave = ( rp.slave().host == rp.left().host );
-
- checkWrite( rp.master(), rp.slave() );
-
- // allow slave to finish initial sync
- var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} );
- assert( res.ok , "replacepeer didn't finish: " + tojson( res ) );
-
- // Should not be saved to slave.
- writeOne( rp.master() );
- // Make sure there would be enough time to save to l if we hadn't called replacepeer.
- sleep( 10000 );
-
- ports.forEach( function( x ) { stopMongod( x, signal ); } );
-
- if ( leftSlave ) {
- l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( l, o, a );
- resetDbpath( "/data/db/" + baseName + "-right" );
- } else {
- o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] );
- r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
- rp = new ReplPair( o, r, a );
- resetDbpath( "/data/db/" + baseName + "-left" );
- }
-
- rp.start( true );
- rp.waitForSteadyState( [ 1, 0 ] );
-
- rp.slave().setSlaveOk();
- assert.eq( 1, rp.slave().getDB( baseName ).z.find().toArray().length );
- assert.eq( 1, rp.master().getDB( baseName ).z.find().toArray().length );
-
- checkWrite( rp.master(), rp.slave() );
- assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length );
-
- ports.forEach( function( x ) { stopMongod( x ); } );
-
-}
-
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
-
-print("replace2Peer finishes");
diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js
deleted file mode 100644
index 60b3531..0000000
--- a/jstests/repl/snapshot2.js
+++ /dev/null
@@ -1,72 +0,0 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of master
-
-print("snapshot2.js 1 -----------------------------------------------------------");
-
-ports = allocatePorts( 3 );
-
-var baseName = "repl_snapshot2";
-var basePath = "/data/db/" + baseName;
-
-a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
-l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
-print("snapshot2.js 2 -----------------------------------------------------------");
-
-rp = new ReplPair(l, r, a);
-rp.start();
-print("snapshot2.js 3 -----------------------------------------------------------");
-rp.waitForSteadyState();
-
-print("snapshot2.js 4 -----------------------------------------------------------");
-
-big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-print("snapshot2.js 5 -----------------------------------------------------------");
-for (i = 0; i < 500; ++i) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if (i % 250 == 249) {
- function p() { return i + 1 == rp.slave().getDB(baseName)[baseName].count(); }
- try {
- assert.soon(p);
- } catch (e) {
- print("\n\n\nsnapshot2.js\ni+1:" + (i + 1));
- print("slave count:" + rp.slave().getDB(baseName)[baseName].count());
- sleep(2000);
- print(p());
- throw (e);
- }
- sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
- }
-}
-print("snapshot2.js 6 -----------------------------------------------------------");
-
-rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-leftMaster = ( rp.master().host == rp.left().host );
-rp.killNode( rp.slave() );
-if ( leftMaster ) {
- copyDbpath( basePath + "-left", basePath + "-right" );
-} else {
- copyDbpath( basePath + "-right", basePath + "-left" );
-}
-rp.master().getDB( "admin" ).$cmd.sys.unlock.findOne();
-rp.killNode( rp.master() );
-
-clearRawMongoProgramOutput();
-
-rp.right_.extraArgs_ = [ "--fastsync" ];
-rp.left_.extraArgs_ = [ "--fastsync" ];
-
-rp.start( true );
-rp.waitForSteadyState();
-assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
-rp.slave().setSlaveOk();
-assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:500} );
-assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-
-assert( !rawMongoProgramOutput().match( /resync/ ) );
-assert(!rawMongoProgramOutput().match(/SyncException/));
-
-print("snapshot2.js SUCCESS ----------------");
-
diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js
deleted file mode 100644
index 02955e5..0000000
--- a/jstests/repl/snapshot3.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Test SERVER-623 - starting repl peer from a new snapshot of slave
-
-ports = allocatePorts( 3 );
-
-var baseName = "repl_snapshot3";
-var basePath = "/data/db/" + baseName;
-
-a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" );
-l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] );
-r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] );
-
-rp = new ReplPair( l, r, a );
-rp.start();
-rp.waitForSteadyState();
-
-big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously
-rp.slave().setSlaveOk();
-for( i = 0; i < 500; ++i ) {
- rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
- if ( i % 250 == 249 ) {
- assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } );
- sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise
- }
-}
-
-rp.slave().getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-leftSlave = ( rp.slave().host == rp.left().host );
-rp.killNode( rp.master() );
-if ( leftSlave ) {
- copyDbpath( basePath + "-left", basePath + "-right" );
-} else {
- copyDbpath( basePath + "-right", basePath + "-left" );
-}
-rp.slave().getDB( "admin" ).$cmd.sys.unlock.findOne();
-rp.killNode( rp.slave() );
-
-clearRawMongoProgramOutput();
-
-rp.right_.extraArgs_ = [ "--fastsync" ];
-rp.left_.extraArgs_ = [ "--fastsync" ];
-
-rp.start( true );
-rp.waitForSteadyState();
-assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() );
-rp.slave().setSlaveOk();
-assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() );
-rp.master().getDB( baseName )[ baseName ].save( {i:500} );
-assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } );
-
-assert( !rawMongoProgramOutput().match( new RegExp( "resync.*" + baseName + ".*\n" ) ) , "last1" );
-assert( !rawMongoProgramOutput().match( /SyncException/ ) , "last2" );
-
-print("snapshot3.js finishes");
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 60e4b95..71ab2d9 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -3,17 +3,27 @@
load("jstests/replsets/rslib.js");
var name = "rs_auth1";
-var port = allocatePorts(4);
-var path = "jstests/replsets/";
+var port = allocatePorts(5);
+var path = "jstests/libs/";
+
+
+print("try starting mongod with auth");
+var m = runMongoProgram( "mongod", "--auth", "--port", port[4], "--dbpath", "/data/db/wrong-auth");
+
+assert.throws(function() {
+ m.getDB("local").auth("__system", "");
+});
+
+stopMongod(port[4]);
+
-
print("reset permissions");
run("chmod", "644", path+"key1");
run("chmod", "644", path+"key2");
print("try starting mongod");
-var m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name);
+m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name);
print("should fail with wrong permissions");
@@ -81,6 +91,10 @@ function doQueryOn(p) {
doQueryOn(slave);
master.adminCommand({logout:1});
+
+print("unauthorized:");
+printjson(master.adminCommand({replSetGetStatus : 1}));
+
doQueryOn(master);
@@ -125,11 +139,12 @@ master.auth("bar", "baz");
for (var i=0; i<1000; i++) {
master.foo.insert({x:i, foo : "bar"});
}
-master.runCommand({getlasterror:1, w:3, wtimeout:60000});
+var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000});
+printjson(result);
print("resync");
-rs.restart(0);
+rs.restart(0, {"keyFile" : path+"key1"});
print("add some more data 2");
@@ -159,7 +174,7 @@ master.getSisterDB("admin").auth("foo", "bar");
print("shouldn't ever sync");
-for (var i = 0; i<30; i++) {
+for (var i = 0; i<10; i++) {
print("iteration: " +i);
var results = master.adminCommand({replSetGetStatus:1});
printjson(results);
@@ -177,9 +192,15 @@ conn = new MongodRunner(port[3], "/data/db/"+name+"-3", null, null, ["--replSet"
conn.start();
wait(function() {
+ try {
var results = master.adminCommand({replSetGetStatus:1});
printjson(results);
return results.members[3].state == 2;
+ }
+ catch (e) {
+ print(e);
+ }
+ return false;
});
print("make sure it has the config, too");
diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js
index 6d2d0f3..2519c09 100644
--- a/jstests/replsets/cloneDb.js
+++ b/jstests/replsets/cloneDb.js
@@ -6,34 +6,36 @@ doTest = function( signal ) {
var N = 2000
- // ~1KB string
+ print("~1KB string");
var Text = ''
for (var i = 0; i < 40; i++)
Text += 'abcdefghijklmnopqrstuvwxyz'
- // Create replica set
+ print("Create replica set");
var repset = new ReplicaSet ('testSet', 3) .begin()
var master = repset.getMaster()
var db1 = master.getDB('test')
- // Insert data
+ print("Insert data");
for (var i = 0; i < N; i++) {
db1['foo'].insert({x: i, text: Text})
db1.getLastError(2) // wait to be copied to at least one secondary
}
- // Create single server
+ print("Create single server");
var solo = new Server ('singleTarget')
var soloConn = solo.begin()
+ soloConn.getDB("admin").runCommand({setParameter:1,logLevel:5});
+
var db2 = soloConn.getDB('test')
- // Clone db from replica set to single server
+ print("Clone db from replica set to single server");
db2.cloneDatabase (repset.getURL())
- // Confirm clone worked
+ print("Confirm clone worked");
assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test1)')
- // Now test the reverse direction
+ print("Now test the reverse direction");
db1 = master.getDB('test2')
db2 = soloConn.getDB('test2')
for (var i = 0; i < N; i++) {
@@ -43,7 +45,7 @@ doTest = function( signal ) {
db1.cloneDatabase (solo.host())
assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test2)')
- // Shut down replica set and single server
+ print("Shut down replica set and single server");
solo.end()
repset.stopSet( signal )
}
diff --git a/jstests/replsets/config1.js b/jstests/replsets/config1.js
deleted file mode 100644
index 748ce8f..0000000
--- a/jstests/replsets/config1.js
+++ /dev/null
@@ -1,21 +0,0 @@
-doTest = function( signal ) {
- var name = 'config1';
-
- var replTest = new ReplSetTest( {name: name, nodes: 3} );
- var nodes = replTest.startSet();
-
- var config = replTest.getReplSetConfig();
- config.settings = {"heartbeatSleep" : .5, heartbeatTimeout : .8};
-
- replTest.initiate(config);
-
- // Call getMaster to return a reference to the node that's been
- // elected master.
- var master = replTest.getMaster();
-
- config = master.getDB("local").system.replset.findOne();
- assert.eq(config.settings.heartbeatSleep, .5);
- assert.eq(config.settings.heartbeatTimeout, .8);
-};
-
-doTest(15);
diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js
new file mode 100755
index 0000000..795e667
--- /dev/null
+++ b/jstests/replsets/downstream.js
@@ -0,0 +1,36 @@
+// BUG: [SERVER-1768] replica set getlasterror {w: 2} after 2000
+// inserts hangs while secondary servers log "replSet error RS102 too stale to catch up" every once in a while
+
+function newReplicaSet (name, numServers) {
+ var rs = new ReplSetTest({name: name, nodes: numServers})
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+function go() {
+var N = 2000
+
+// ~1KB string
+var Text = ''
+for (var i = 0; i < 40; i++)
+ Text += 'abcdefghijklmnopqrstuvwxyz'
+
+// Create replica set of 3 servers
+var repset = newReplicaSet('repset', 3)
+var conn = repset.getMaster()
+var db = conn.getDB('test')
+
+// Add data to it
+for (var i = 0; i < N; i++)
+ db['foo'].insert({x: i, text: Text})
+
+// wait to be copied to at least one secondary (BUG hangs here)
+db.getLastError(2)
+
+print('getlasterror_w2.js SUCCESS')
+}
+
+// turn off until fixed
+//go();
diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js
index d7c3905..1c9c215 100644
--- a/jstests/replsets/fastsync.js
+++ b/jstests/replsets/fastsync.js
@@ -22,7 +22,7 @@ var wait = function(f) {
}
var reconnect = function(a) {
- wait(function() {
+ wait(function() {
try {
a.getDB("foo").bar.stats();
return true;
@@ -33,7 +33,7 @@ var reconnect = function(a) {
});
};
-ports = allocatePorts( 3 );
+ports = allocatePorts( 4 );
var basename = "jstests_fastsync";
var basePath = "/data/db/" + basename;
@@ -48,7 +48,7 @@ var admin = p.getDB("admin");
var foo = p.getDB("foo");
var local = p.getDB("local");
-var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0]}]};
+var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0], priority:2}]};
printjson(config);
var result = admin.runCommand({replSetInitiate : config});
print("result:");
@@ -59,10 +59,19 @@ while (count < 10 && result.ok != 1) {
count++;
sleep(2000);
result = admin.runCommand({replSetInitiate : config});
-}
+}
assert(result.ok, tojson(result));
-assert.soon(function() { return admin.runCommand({isMaster:1}).ismaster; });
+assert.soon(function() { result = false;
+ try {
+ result = admin.runCommand({isMaster:1}).ismaster;
+ }
+ catch(e) {
+ print(e);
+ return false;
+ }
+ return result;
+ });
print("1");
for (var i=0; i<100000; i++) {
@@ -73,45 +82,113 @@ print("total in foo: "+foo.bar.count());
print("2");
admin.runCommand( {fsync:1,lock:1} );
-copyDbpath( basePath + "-p", basePath + "-s" );
+copyDbpath( basePath + "-p", basePath + "-s"+1 );
admin.$cmd.sys.unlock.findOne();
-
print("3");
-var sargs = new MongodRunner( ports[ 1 ], basePath + "-s", false, false,
+var startSlave = function(n) {
+ var sargs = new MongodRunner( ports[ n ], basePath + "-s"+n, false, false,
["--replSet", basename, "--fastsync",
"--oplogSize", 2], {no_bind : true} );
-var reuseData = true;
-sargs.start(reuseData);
+ var reuseData = true;
+ var conn = sargs.start(reuseData);
+
+ config = local.system.replset.findOne();
+ config.version++;
+ config.members.push({_id:n, host:hostname+":"+ports[n]});
+
+ result = admin.runCommand({replSetReconfig : config});
+ printjson(result);
+ assert(result.ok, "reconfig worked");
+ reconnect(p);
+
+ print("4");
+ var status = admin.runCommand({replSetGetStatus : 1});
+ var count = 0;
+ while (status.members[n].state != 2 && count < 200) {
+ print("not a secondary yet");
+ if (count % 10 == 0) {
+ printjson(status);
+ }
+ assert(!status.members[n].errmsg || !status.members[n].errmsg.match("^initial sync cloning db"));
-config = local.system.replset.findOne();
-config.version++;
-config.members.push({_id:1, host:hostname+":"+ports[1]});
+ sleep(1000);
-result = admin.runCommand({replSetReconfig : config});
-assert(result.ok, "reconfig worked");
-reconnect(p);
+ // disconnection could happen here
+ try {
+ status = admin.runCommand({replSetGetStatus : 1});
+ }
+ catch (e) {
+ print(e);
+ }
+ count++;
+ }
-print("4");
-var status = admin.runCommand({replSetGetStatus : 1});
-var count = 0;
-while (status.members[1].state != 2 && count < 200) {
- print("not a secondary yet");
- if (count % 10 == 0) {
- printjson(status);
- }
- assert(!status.members[1].errmsg || !status.members[1].errmsg.match("^initial sync cloning db"));
-
- sleep(1000);
-
- // disconnection could happen here
- try {
- status = admin.runCommand({replSetGetStatus : 1});
- }
- catch (e) {
- print(e);
- }
- count++;
+ assert.eq(status.members[n].state, 2);
+
+ assert.soon(function() {
+ return admin.runCommand({isMaster : 1}).ismaster;
+ });
+
+ admin.foo.insert({x:1});
+ assert.soon(function() {
+ var last = local.oplog.rs.find().sort({$natural:-1}).limit(1).next();
+ var cur = conn.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next();
+ print("last: "+tojson(last)+" cur: "+tojson(cur));
+ return cur != null && last != null && cur.ts.t == last.ts.t && cur.ts.i == last.ts.i;
+ });
+
+ return conn;
+};
+
+var s1 = startSlave(1);
+
+var me1 = s1.getDB("local").me.findOne();
+
+print("me: " +me1._id);
+assert(me1._id != null);
+
+print("5");
+s1.getDB("admin").runCommand( {fsync:1,lock:1} );
+copyDbpath( basePath + "-s1", basePath + "-s2" );
+s1.getDB("admin").$cmd.sys.unlock.findOne();
+
+var s2 = startSlave(2);
+
+var me2 = s2.getDB("local").me.findOne();
+
+print("me: " +me2._id);
+assert(me1._id != me2._id);
+
+print("restart member with a different port and make it a new set");
+try {
+ p.getDB("admin").runCommand({shutdown:1});
+}
+catch(e) {
+ print("good, shutting down: " +e);
}
+sleep(10000);
+
+pargs = new MongodRunner( ports[ 3 ], basePath + "-p", false, false,
+ ["--replSet", basename, "--oplogSize", 2],
+ {no_bind : true} );
+p = pargs.start(true);
+
+printjson(p.getDB("admin").runCommand({replSetGetStatus:1}));
+
+p.getDB("admin").runCommand({replSetReconfig : {
+ _id : basename,
+ members : [{_id:0, host : hostname+":"+ports[3]}]
+ }, force : true});
+
+print("start waiting for primary...");
+assert.soon(function() {
+ try {
+ return p.getDB("admin").runCommand({isMaster : 1}).ismaster;
+ }
+ catch(e) {
+ print(e);
+ }
+ return false;
+ }, "waiting for master", 60000);
-assert.eq(status.members[1].state, 2);
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index df978c4..4cfd606 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -95,12 +95,11 @@ replTest.stop(1);
print("8. Eventually it should become a secondary");
print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
-sleep(5*60*1000);
wait(function() {
var status = admin_s2.runCommand({replSetGetStatus:1});
occasionally(function() { printjson(status); });
return status.members[2].state == 2;
- });
+ }, 350);
print("9. Bring #2 back up");
@@ -122,5 +121,5 @@ for (var i=0; i<10000; i++) {
print("11. Everyone happy eventually");
-replTest.awaitReplication();
+replTest.awaitReplication(300000);
diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js
index 471aa16..7f2af94 100644
--- a/jstests/replsets/initial_sync3.js
+++ b/jstests/replsets/initial_sync3.js
@@ -43,14 +43,14 @@ wait(function() {
if (!status.members) {
return false;
}
-
+
for (i=0; i<7; i++) {
if (status.members[i].state != 1 && status.members[i].state != 2) {
return false;
}
}
return true;
-
+
});
replTest.awaitReplication();
@@ -66,6 +66,7 @@ rs2.initiate();
master = rs2.getMaster();
var config = master.getDB("local").system.replset.findOne();
config.version++;
+config.members[0].priority = 2;
config.members[0].initialSync = {state : 2};
config.members[1].initialSync = {state : 1};
try {
@@ -75,12 +76,34 @@ catch(e) {
print("trying to reconfigure: "+e);
}
-master = rs2.getMaster();
-config = master.getDB("local").system.replset.findOne();
+// wait for a heartbeat, too, just in case sync happens before hb
+assert.soon(function() {
+ try {
+ for (var n in rs2.nodes) {
+ if (rs2.nodes[n].getDB("local").system.replset.findOne().version != 2) {
+ return false;
+ }
+ }
+ }
+ catch (e) {
+ return false;
+ }
+ return true;
+});
+
+rs2.awaitReplication();
+
+// test partitioning
+master = rs2.bridge();
+rs2.partition(0, 2);
+
+master.getDB("foo").bar.baz.insert({x:1});
+rs2.awaitReplication();
-assert(typeof(config.members[0].initialSync) == "object");
-assert.eq(config.members[0].initialSync.state, 2);
-assert.eq(config.members[1].initialSync.state, 1);
+master.getDB("foo").bar.baz.insert({x:2});
+var x = master.getDB("foo").runCommand({getLastError : 1, w : 3, wtimeout : 5000});
+printjson(x);
+assert.eq(null, x.err);
rs2.stopSet();
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
new file mode 100644
index 0000000..5b068cd
--- /dev/null
+++ b/jstests/replsets/maintenance.js
@@ -0,0 +1,32 @@
+
+
+var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+var conns = replTest.startSet();
+replTest.initiate();
+
+// Make sure we have a master
+var master = replTest.getMaster();
+
+for (i=0;i<10000; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); }
+for (i=0;i<1000; i++) { master.getDB("bar").foo.update({y:i},{$push :{foo : "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); }
+
+replTest.awaitReplication();
+
+assert.soon(function() { return conns[2].getDB("admin").isMaster().secondary; });
+
+join = startParallelShell( "db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[2] );
+
+print("check secondary goes to recovering");
+assert.soon(function() { return !conns[2].getDB("admin").isMaster().secondary; });
+
+print("joining");
+join();
+
+print("check secondary becomes a secondary again");
+var x = 0;
+assert.soon(function() {
+ var im = conns[2].getDB("admin").isMaster();
+ if (x++ % 5 == 0) printjson(im);
+ return im.secondary;
+});
+
diff --git a/jstests/replsets/majority.js b/jstests/replsets/majority.js
new file mode 100644
index 0000000..6df1a41
--- /dev/null
+++ b/jstests/replsets/majority.js
@@ -0,0 +1,60 @@
+var num = 5;
+var host = getHostName();
+var name = "tags";
+var timeout = 10000;
+
+var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} );
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : host+":"+port[0], priority : 2},
+ {_id:1, host : host+":"+port[1]},
+ {_id:2, host : host+":"+port[2]},
+ {_id:3, host : host+":"+port[3], arbiterOnly : true},
+ {_id:4, host : host+":"+port[4], arbiterOnly : true},
+ ],
+ });
+
+replTest.awaitReplication();
+replTest.bridge();
+
+var testInsert = function() {
+ master.getDB("foo").bar.insert({x:1});
+ var result = master.getDB("foo").runCommand({getLastError:1, w:"majority", wtimeout:timeout});
+ printjson(result);
+ return result;
+};
+
+var master = replTest.getMaster();
+
+print("get back in the groove");
+testInsert();
+replTest.awaitReplication();
+
+print("makes sure majority works");
+assert.eq(testInsert().err, null);
+
+print("setup: 0,1 | 2,3,4");
+replTest.partition(0,2);
+replTest.partition(0,3);
+replTest.partition(0,4);
+replTest.partition(1,2);
+replTest.partition(1,3);
+replTest.partition(1,4);
+
+print("make sure majority doesn't work");
+// primary should now be 2
+master = replTest.getMaster();
+assert.eq(testInsert().err, "timeout");
+
+print("bring set back together");
+replTest.unPartition(0,2);
+replTest.unPartition(0,3);
+replTest.unPartition(1,4);
+
+master = replTest.getMaster();
+
+print("make sure majority works");
+assert.eq(testInsert().err, null);
+
diff --git a/jstests/replsets/randomcommands1.js b/jstests/replsets/randomcommands1.js
deleted file mode 100644
index c451e74..0000000
--- a/jstests/replsets/randomcommands1.js
+++ /dev/null
@@ -1,29 +0,0 @@
-
-replTest = new ReplSetTest( {name: 'randomcommands1', nodes: 3} );
-
-nodes = replTest.startSet();
-replTest.initiate();
-
-master = replTest.getMaster();
-slaves = replTest.liveNodes.slaves;
-printjson(replTest.liveNodes);
-
-db = master.getDB("foo")
-t = db.foo
-
-ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
-
-t.save({a: 1000});
-t.ensureIndex( { a : 1 } )
-
-db.getLastError( 3 , 30000 )
-
-ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
-
-t.reIndex()
-
-db.getLastError( 3 , 30000 )
-ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
-
-replTest.stopSet( 15 )
-
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
new file mode 100644
index 0000000..b7dca03
--- /dev/null
+++ b/jstests/replsets/reconfig.js
@@ -0,0 +1,69 @@
+
+// try reconfiguring with servers down
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getMaster();
+
+print("initial sync");
+master.getDB("foo").bar.insert({X:1});
+replTest.awaitReplication();
+
+print("stopping 3 & 4");
+replTest.stop(3);
+replTest.stop(4);
+
+print("reconfiguring");
+master = replTest.getMaster();
+var config = master.getDB("local").system.replset.findOne();
+var oldVersion = config.version++;
+config.members[0].votes = 2;
+config.members[3].votes = 2;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+var config = master.getDB("local").system.replset.findOne();
+assert.eq(oldVersion+1, config.version);
+
+
+print("0 & 3 up; 1, 2, 4 down");
+replTest.restart(3);
+
+// in case 0 isn't master
+replTest.awaitReplication();
+
+replTest.stop(1);
+replTest.stop(2);
+
+print("try to reconfigure with a 'majority' down");
+oldVersion = config.version;
+config.version++;
+master = replTest.getMaster();
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+var config = master.getDB("local").system.replset.findOne();
+assert.eq(oldVersion+1, config.version);
+
+replTest.stopSet();
+
+replTest2 = new ReplSetTest({name : 'testSet2', nodes : 1});
+nodes = replTest2.startSet();
+
+result = nodes[0].getDB("admin").runCommand({replSetInitiate : {_id : "testSet2", members : [
+ {_id : 0, tags : ["member0"]}
+ ]}});
+
+assert(result.errmsg.match(/bad or missing host field/));
+
+replTest2.stopSet();
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index ebd17d6..f93fe9e 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -16,7 +16,7 @@ var host = getHostName();
print("Start set with three nodes");
-var replTest = new ReplSetTest( {name: name, nodes: 3} );
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getMaster();
@@ -28,85 +28,44 @@ master.getDB("foo").bar.baz.insert({x:1});
replTest.awaitReplication();
-print("Remove slave2");
+print("Remove slaves");
var config = replTest.getReplSetConfig();
config.members.pop();
config.version = 2;
-try {
- master.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
- print(e);
-}
-reconnect(master);
-
-
-print("Remove slave1");
-config.members.pop();
-config.version = 3;
-try {
- master.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
- print(e);
-}
-reconnect(master);
-
-print("sleeping 1");
-sleep(10000);
-// these are already down, but this clears their ports from memory so that they
-// can be restarted later
-stopMongod(replTest.getPort(1));
-stopMongod(replTest.getPort(2));
-
-
-print("Bring slave1 back up");
-var paths = [ replTest.getPath(1), replTest.getPath(2) ];
-var ports = allocatePorts(2, replTest.getPort(2)+1);
-var args = ["mongod", "--port", ports[0], "--dbpath", paths[0], "--noprealloc", "--smallfiles", "--rest"];
-var conn = startMongoProgram.apply( null, args );
-conn.getDB("local").system.replset.remove();
-printjson(conn.getDB("local").runCommand({getlasterror:1}));
-print(conn);
-print("sleeping 2");
-sleep(10000);
-stopMongod(ports[0]);
-
-replTest.restart(1);
-
-
-print("Bring slave2 back up");
-args[2] = ports[1];
-args[4] = paths[1];
-conn = startMongoProgram.apply( null, args );
-conn.getDB("local").system.replset.remove();
-print("path: "+paths[1]);
-print("sleeping 3");
-sleep(10000);
-stopMongod(ports[1]);
-
-replTest.restart(2);
-sleep(10000);
-
-
-print("Add them back as slaves");
+assert.soon(function() {
+ try {
+ master.getDB("admin").runCommand({replSetReconfig:config});
+ }
+ catch(e) {
+ print(e);
+ }
+
+ reconnect(master);
+ reconnect(replTest.nodes[1]);
+ var c = master.getDB("local").system.replset.findOne();
+ return c.version == 2;
+ });
+
+print("Add it back as a slave");
config.members.push({_id:1, host : host+":"+replTest.getPort(1)});
-config.members.push({_id:2, host : host+":"+replTest.getPort(2)});
-config.version = 4;
+config.version = 3;
+printjson(config);
wait(function() {
try {
- master.getDB("admin").runCommand({replSetReconfig:config});
+ master.getDB("admin").runCommand({replSetReconfig:config});
}
catch(e) {
- print(e);
+ print(e);
}
reconnect(master);
+ printjson(master.getDB("admin").runCommand({replSetGetStatus:1}));
master.setSlaveOk();
var newConfig = master.getDB("local").system.replset.findOne();
- return newConfig.version == 4;
- });
+ print( "newConfig: " + tojson(newConfig) );
+ return newConfig.version == 3;
+} , "wait1" );
print("Make sure everyone's secondary");
@@ -115,18 +74,49 @@ wait(function() {
occasionally(function() {
printjson(status);
});
-
- if (!status.members || status.members.length != 3) {
+
+ if (!status.members || status.members.length != 2) {
return false;
}
- for (var i = 0; i<3; i++) {
+ for (var i = 0; i<2; i++) {
if (status.members[i].state != 1 && status.members[i].state != 2) {
return false;
}
}
return true;
- });
+} , "wait2" );
+
+
+print("reconfig with minority");
+replTest.stop(1);
+
+assert.soon(function() {
+ try {
+ return master.getDB("admin").runCommand({isMaster : 1}).secondary;
+ }
+ catch(e) {
+ print("trying to get master: "+e);
+ }
+});
+
+config.version = 4;
+config.members.pop();
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config, force : true});
+}
+catch(e) {
+ print(e);
+}
+
+reconnect(master);
+assert.soon(function() {
+ return master.getDB("admin").runCommand({isMaster : 1}).ismaster;
+});
+
+config = master.getDB("local").system.replset.findOne();
+printjson(config);
+assert(config.version > 4);
replTest.stopSet();
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 5ac94e7..6387c5d 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -108,6 +108,28 @@ doTest = function( signal ) {
assert.eq( 1000 , count.n , "slave count wrong: " + slave );
});
+ // last error
+ master = replTest.getMaster();
+ slaves = replTest.liveNodes.slaves;
+ printjson(replTest.liveNodes);
+
+ db = master.getDB("foo")
+ t = db.foo
+
+ ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } )
+
+ t.save({a: 1000});
+ t.ensureIndex( { a : 1 } )
+
+ db.getLastError( 3 , 30000 )
+
+ ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
+ t.reIndex()
+
+ db.getLastError( 3 , 30000 )
+ ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } )
+
// Shut down the set and finish the test.
replTest.stopSet( signal );
}
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index faa0627..ba08eac 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -29,7 +29,7 @@ doTest = function (signal) {
// Step down master. Note: this may close our connection!
try {
- master.getDB("admin").runCommand({ replSetStepDown: true });
+ master.getDB("admin").runCommand({ replSetStepDown: true, force: 1 });
} catch (err) {
print("caught: " + err + " on stepdown");
}
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index 6a7d8a5..67ce2d7 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -23,51 +23,63 @@ doTest = function (signal) {
master.getDB("barDB").bar.save({ a: 1 });
replTest.awaitReplication();
- // These writes should be replicated immediately
- var docNum = 5000;
- for(var n=0; n<docNum; n++) {
- master.getDB(testDB).foo.insert({ n: n });
- }
+ // These writes should be replicated immediately
+ var docNum = 5000;
+ for (var n = 0; n < docNum; n++) {
+ master.getDB(testDB).foo.insert({ n: n });
+ }
- // If you want to test failure, just add values for w and wtimeout
- // to the following command. This will override the default set above and
- // prevent replication from happening in time for the count tests below.
- master.getDB("admin").runCommand({getlasterror: 1});
+ // should use the configured last error defaults from above, that's what we're testing.
+ //
+ // If you want to test failure, just add values for w and wtimeout (e.g. w=1)
+ // to the following command. This will override the default set above and
+ // prevent replication from happening in time for the count tests below.
+ //
+ var result = master.getDB("admin").runCommand({ getlasterror: 1 });
+ print("replset5.js getlasterror result:");
+ printjson(result);
+
+ if (result.err == "timeout") {
+ print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n")
+ replTest.stopSet(signal);
+ print("\WARNING getLastError timed out and should not have.\nThis machine seems extremely slow. Stopping test without failing it\n")
+ return;
+ }
var slaves = replTest.liveNodes.slaves;
slaves[0].setSlaveOk();
slaves[1].setSlaveOk();
- print("Testing slave counts");
+ print("replset5.js Testing slave counts");
+
+ var slave0count = slaves[0].getDB(testDB).foo.count();
+ assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
+
+ var slave1count = slaves[1].getDB(testDB).foo.count();
+ assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
+
+ var master1count = master.getDB(testDB).foo.count();
+ assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
+
+ print("replset5.js reconfigure with hidden=1");
+ config = master.getDB("local").system.replset.findOne();
+ config.version++;
+ config.members[2].priority = 0;
+ config.members[2].hidden = 1;
+
+ try {
+ master.adminCommand({ replSetReconfig: config });
+ }
+ catch (e) {
+ print(e);
+ }
+
+ config = master.getDB("local").system.replset.findOne();
+ printjson(config);
+ assert.eq(config.members[2].hidden, true);
- var slave0count = slaves[0].getDB(testDB).foo.count();
- assert( slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
-
- var slave1count = slaves[1].getDB(testDB).foo.count();
- assert( slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
-
- var master1count = master.getDB(testDB).foo.count();
- assert( master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
-
- print("reconfigure with hidden=1");
- config = master.getDB("local").system.replset.findOne();
- config.version++;
- config.members[2].priority = 0;
- config.members[2].hidden = 1;
-
- try {
- master.adminCommand({replSetReconfig : config});
- }
- catch(e) {
- print(e);
- }
-
- config = master.getDB("local").system.replset.findOne();
- printjson(config);
- assert.eq(config.members[2].hidden, true);
-
replTest.stopSet(signal);
}
-doTest( 15 );
-print("replset5.js success");
+doTest( 15 );
+print("replset5.js success");
diff --git a/jstests/replsets/replsetadd.js b/jstests/replsets/replsetadd.js
index 673e1d7..44ef7c6 100644
--- a/jstests/replsets/replsetadd.js
+++ b/jstests/replsets/replsetadd.js
@@ -2,13 +2,17 @@
doTest = function( signal ) {
// Test add node
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 0} );
+ var replTest = new ReplSetTest( {name: 'testSet', nodes: 0, host:"localhost"} );
var first = replTest.add();
// Initiate replica set
assert.soon(function() {
- var res = first.getDB("admin").runCommand({replSetInitiate: null});
+ var res = first.getDB("admin").runCommand({replSetInitiate: {
+ _id : 'testSet',
+ members : [{_id : 0, host : "localhost:"+replTest.ports[0]}]
+ }
+ });
return res['ok'] == 1;
});
@@ -18,12 +22,36 @@ doTest = function( signal ) {
return result['ok'] == 1;
});
+ replTest.getMaster();
+
// Start a second node
var second = replTest.add();
// Add the second node.
// This runs the equivalent of rs.add(newNode);
- replTest.reInitiate();
+ print("calling add again");
+ try {
+ replTest.reInitiate();
+ }
+ catch(e) {
+ print(e);
+ }
+
+ print("try to change to localhost to "+getHostName());
+ var master = replTest.getMaster();
+
+ var config = master.getDB("local").system.replset.findOne();
+ config.version++;
+ config.members.forEach(function(m) {
+ m.host = m.host.replace("localhost", getHostName());
+ print(m.host);
+ });
+ printjson(config);
+
+ print("trying reconfig that shouldn't work");
+ var result = master.getDB("admin").runCommand({replSetReconfig: config});
+ assert.eq(result.ok, 0);
+ assert.eq(result.assertionCode, 13645);
replTest.stopSet( signal );
}
diff --git a/jstests/replsets/replsetarb1.js b/jstests/replsets/replsetarb1.js
deleted file mode 100644
index a323290..0000000
--- a/jstests/replsets/replsetarb1.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// FAILING TEST
-// no primary is ever elected if the first server is an arbiter
-
-doTest = function( signal ) {
-
- var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
- var nodes = replTest.nodeList();
-
- print(tojson(nodes));
-
- var conns = replTest.startSet();
- var r = replTest.initiate({"_id" : "unicomplex",
- "members" : [
- {"_id" : 0, "host" : nodes[0], "arbiterOnly" : true},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2]}]});
-
- // Make sure we have a master
- // Neither this
- var master = replTest.getMaster();
-
- // Make sure we have an arbiter
- // Nor this will succeed
- assert.soon(function() {
- res = conns[0].getDB("admin").runCommand({replSetGetStatus: 1});
- printjson(res);
- return res.myState == 7;
- }, "Aribiter failed to initialize.");
-
- replTest.stopSet( signal );
-}
-
-// doTest( 15 );
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 0e4c791..6f712cb 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -1,5 +1,4 @@
// Election when master fails and remaining nodes are an arbiter and a slave.
-// Note that in this scenario, the arbiter needs two votes.
doTest = function( signal ) {
@@ -9,11 +8,11 @@ doTest = function( signal ) {
print(tojson(nodes));
var conns = replTest.startSet();
- var r = replTest.initiate({"_id" : "unicomplex",
+ var r = replTest.initiate({"_id" : "unicomplex",
"members" : [
- {"_id" : 0, "host" : nodes[0] },
- {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 2},
- {"_id" : 2, "host" : nodes[2] }]});
+ {"_id" : 0, "host" : nodes[0] },
+ {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 1, "priority" : 0},
+ {"_id" : 2, "host" : nodes[2] }]});
// Make sure we have a master
var master = replTest.getMaster();
@@ -25,6 +24,10 @@ doTest = function( signal ) {
return res.myState == 7;
}, "Aribiter failed to initialize.");
+ var result = conns[1].getDB("admin").runCommand({isMaster : 1});
+ assert(result.arbiterOnly);
+ assert(!result.passive);
+
// Wait for initial replication
master.getDB("foo").foo.insert({a: "foo"});
replTest.awaitReplication();
diff --git a/jstests/replsets/replsetarb3.js b/jstests/replsets/replsetarb3.js
deleted file mode 100644
index 1193cf2..0000000
--- a/jstests/replsets/replsetarb3.js
+++ /dev/null
@@ -1,144 +0,0 @@
-// @file replsetarb3.js
-// try turning arbiters into non-arbiters and vice versa
-
-/*
- * 1: initialize set
- * 2: check m3.state == 7
- * 3: reconfig
- * 4: check m3.state == 2
- * 5: reconfig
- * 6: check m3.state == 7
- * 7: reconfig
- * 8: check m3.state == 2
- * 9: insert 10000
- * 10: reconfig
- * 11: check m3.state == 7
- */
-
-var debug = false;
-
-var statusSoon = function(s) {
- assert.soon(function() {
- var status = master.getDB("admin").runCommand({ replSetGetStatus: 1 });
- if (debug)
- printjson(status);
- return status.members[2].state == s;
- });
-};
-
-var w = 0;
-var wait = function(f) {
- w++;
- var n = 0;
- while (!f()) {
- if( n % 4 == 0 )
- print("toostale.js waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- assert(n < 200, 'tried 200 times, giving up');
- sleep(1000);
- }
-}
-
-var reconnect = function(a) {
- wait(function() {
- try {
- a.getDB("foo").bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
- });
-};
-
-var reconfig = function() {
- config.version++;
- try {
- var result = master.getDB("admin").runCommand({replSetReconfig : config});
- }
- catch(e) {
- print(e);
- }
- reconnect(master);
- reconnect(replTest.liveNodes.slaves[1]);
- sleep(20000);
-};
-
-var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
-var nodes = replTest.nodeList();
-
-print(tojson(nodes));
-
-
-var conns = replTest.startSet();
-
-print("1");
-var config = {"_id" : "unicomplex", "members" : [
- {"_id" : 0, "host" : nodes[0] },
- {"_id" : 1, "host" : nodes[1] },
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]};
-var r = replTest.initiate(config);
-config.version = 1;
-
-var master = replTest.getMaster();
-
-// Wait for initial replication
-master.getDB("foo").foo.insert({a: "foo"});
-replTest.awaitReplication();
-
-
-print("2");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-
-/*
-print("3");
-delete config.members[2].arbiterOnly;
-reconfig();
-
-
-print("4");
-statusSoon(2);
-assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
-
-
-print("5");
-config.members[2].arbiterOnly = true;
-reconfig();
-
-
-print("6");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-
-
-print("7");
-delete config.members[2].arbiterOnly;
-reconfig();
-
-
-print("8");
-statusSoon(2);
-assert(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count() > 0);
-
-
-print("9");
-for (var i = 0; i < 10000; i++) {
- master.getDB("foo").bar.insert({increment : i, c : 0, foo : "kasdlfjaklsdfalksdfakldfmalksdfmaklmfalkfmkafmdsaklfma", date : new Date(), d : Date()});
-}
-
-
-print("10");
-config.members[2].arbiterOnly = true;
-reconfig();
-
-
-print("11");
-statusSoon(7);
-assert.eq(replTest.liveNodes.slaves[1].getDB("local").oplog.rs.count(), 0);
-*/
-
-replTest.stopSet( 15 );
-
diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js
index 3721ba5..7096349 100644
--- a/jstests/replsets/replsetfreeze.js
+++ b/jstests/replsets/replsetfreeze.js
@@ -53,7 +53,7 @@ var master = replTest.getMaster();
print("2: step down m1");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1});
+ master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
}
catch(e) {
print(e);
@@ -80,7 +80,7 @@ master = replTest.getMaster();
print("6: step down new master");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1});
+ master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
}
catch(e) {
print(e);
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
index 65adaf4..d9f5093 100644
--- a/jstests/replsets/replsetrestart1.js
+++ b/jstests/replsets/replsetrestart1.js
@@ -22,9 +22,16 @@ doTest = function( signal ) {
s1Id = replTest.getNodeId( replTest.liveNodes.slaves[0] );
s2Id = replTest.getNodeId( replTest.liveNodes.slaves[1] );
- replTest.stop( mId );
replTest.stop( s1Id );
replTest.stop( s2Id );
+
+ assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ return status.members[1].state == 8 && status.members[2].state == 8;
+ });
+
+
+ replTest.stop( mId );
// Now let's restart these nodes
replTest.restart( mId );
@@ -35,6 +42,11 @@ doTest = function( signal ) {
master = replTest.getMaster();
slaves = replTest.liveNodes.slaves;
+ assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ return status.members[1].state != 8 && status.members[2].state != 8;
+ });
+
// Do a status check on each node
// Master should be set to 1 (primary)
assert.soon(function() {
diff --git a/jstests/replsets/replsetrestart2.js b/jstests/replsets/replsetrestart2.js
index 324bd37..6d96697 100644
--- a/jstests/replsets/replsetrestart2.js
+++ b/jstests/replsets/replsetrestart2.js
@@ -1,16 +1,16 @@
// config saved on shutdown
var compare_configs = function(c1, c2) {
- assert(c1.version == c2.version, 'version same');
- assert(c1._id == c2._id, '_id same');
+ assert.eq(c1.version, c2.version, 'version same');
+ assert.eq(c1._id, c2._id, '_id same');
printjson(c1);
printjson(c2);
for (var i in c1.members) {
assert(c2.members[i] !== undefined, 'field '+i+' exists in both configs');
- assert(c1.members[i]._id == c2.members[i]._id, 'id is equal in both configs');
- assert(c1.members[i].host == c2.members[i].host, 'id is equal in both configs');
+ assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
+ assert.eq(c1.members[i].host, c2.members[i].host, 'id is equal in both configs');
}
}
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index 46fb548..7ab3c6b 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -202,9 +202,24 @@ doTest = function (signal) {
wait(function () { return B.isMaster().ismaster || B.isMaster().secondary; });
// everyone is up here...
- assert(A.isMaster().ismaster || A.isMaster().secondary, "A up");
- assert(B.isMaster().ismaster || B.isMaster().secondary, "B up");
replTest.awaitReplication();
+
+ // theoretically, a read could slip in between StateBox::change() printing
+ // replSet SECONDARY
+ // and the replset actually becoming secondary
+ // so we're trying to wait for that here
+ print("waiting for secondary");
+ assert.soon(function() {
+ try {
+ var aim = A.isMaster();
+ var bim = B.isMaster();
+ return (aim.ismaster || aim.secondary) &&
+ (bim.ismaster || bim.secondary);
+ }
+ catch(e) {
+ print("checking A and B: "+e);
+ }
+ });
verify(a);
diff --git a/jstests/replsets/rollback4.js b/jstests/replsets/rollback4.js
new file mode 100644
index 0000000..5d3299b
--- /dev/null
+++ b/jstests/replsets/rollback4.js
@@ -0,0 +1,117 @@
+//Test for SERVER-3650 (rollback from slave)
+if (0) { // enable for SERVER-3772
+
+var num = 7;
+var host = getHostName();
+var name = "rollback4";
+
+var replTest = new ReplSetTest( {name: name, nodes: num} );
+var config = replTest.getReplSetConfig();
+
+// set preferred masters
+config.members[0].priority = 3
+config.members[6].priority = 2
+// all other are 1
+
+var nodes = replTest.startSet();
+replTest.initiate(config);
+replTest.awaitReplication()
+replTest.bridge();
+
+replTest.waitForMaster();
+var master = replTest.getMaster();
+printjson(master.adminCommand("replSetGetStatus"));
+
+var mColl = master.getCollection('test.foo');
+
+mColl.insert({});
+printjson(master.adminCommand("replSetGetStatus"));
+printjson(master.adminCommand({getLastError:1, w:7, wtimeout:30*1000}));
+
+// partition 012 | 3456 with 0 and 6 the old and new master
+
+
+printjson({startPartition: new Date()});
+replTest.partition(0,3)
+replTest.partition(0,4)
+replTest.partition(0,5)
+replTest.partition(0,6)
+replTest.partition(1,3)
+replTest.partition(1,4)
+replTest.partition(1,5)
+replTest.partition(1,6)
+replTest.partition(2,3)
+replTest.partition(2,4)
+replTest.partition(2,5)
+replTest.partition(2,6)
+printjson({endPartition: new Date()});
+
+var gotThrough = 0
+try {
+ while (true){
+ mColl.insert({})
+ out = master.adminCommand({getLastError:1, w:3});
+ if (out.err)
+ break;
+
+ gotThrough++;
+ }
+}
+catch (e) {
+ print("caught exception");
+}
+
+printjson({gotThrough: gotThrough});
+printjson({cantWriteOldPrimary: new Date()});
+printjson(master.adminCommand("replSetGetStatus"));
+
+assert(gotThrough > 0, "gotOneThrough");
+
+sleep(5*1000); // make sure new seconds field in opTime
+
+replTest.waitForMaster();
+var master2 = replTest.getMaster();
+printjson(master2.adminCommand("replSetGetStatus"));
+
+var m2Coll = master2.getCollection('test.foo');
+
+var sentinel = {_id: 'sentinel'} // used to detect which master's data is used
+m2Coll.insert(sentinel);
+printjson(master2.adminCommand({getLastError:1, w:4, wtimeout:30*1000}));
+printjson(master2.adminCommand("replSetGetStatus"));
+
+m2Coll.insert({}); // this shouldn't be necessary but the next GLE doesn't work without it
+
+printjson({startUnPartition: new Date()});
+replTest.unPartition(0,3)
+replTest.unPartition(0,4)
+replTest.unPartition(0,5)
+replTest.unPartition(0,6)
+replTest.unPartition(1,3)
+replTest.unPartition(1,4)
+replTest.unPartition(1,5)
+replTest.unPartition(1,6)
+replTest.unPartition(2,3)
+replTest.unPartition(2,4)
+replTest.unPartition(2,5)
+replTest.unPartition(2,6)
+printjson({endUnPartition: new Date()});
+
+printjson(master2.adminCommand({getLastError:1, w:7, wtimeout:30*1000}));
+printjson(master2.adminCommand("replSetGetStatus"));
+
+assert.soon(function() {return master.adminCommand('isMaster').ismaster},
+ "Node 0 back to primary",
+ 60*1000/*needs to be longer than LeaseTime*/);
+printjson(master.adminCommand("replSetGetStatus"));
+
+// make sure old master rolled back to new master
+assert.eq(m2Coll.count(sentinel), 1, "check sentinal on node 6");
+assert.eq(mColl.count(sentinel), 1, "check sentinal on node 0");
+
+replTest.stopSet();
+
+}
+
+
+
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index c072829..19271c9 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -2,7 +2,7 @@
var count = 0;
var w = 0;
-var wait = function(f) {
+var wait = function(f,msg) {
w++;
var n = 0;
while (!f()) {
@@ -11,7 +11,7 @@ var wait = function(f) {
if (++n == 4) {
print("" + f);
}
- assert(n < 200, 'tried 200 times, giving up');
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
sleep(1000);
}
};
@@ -61,3 +61,43 @@ var getLatestOp = function(server) {
}
return null;
};
+
+
+var waitForAllMembers = function(master) {
+ var ready = false;
+ var count = 0;
+
+ outer:
+ while (count < 60) {
+ count++;
+ var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 2 && state.members[m].state != 1) {
+ sleep(1000);
+ continue outer;
+ }
+ }
+ return;
+ }
+
+ assert(false, "all members not ready");
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getMaster().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getMaster().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index e549822..9301c8e 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -1,22 +1,4 @@
-
-var waitForAllMembers = function(master) {
- var ready = false;
-
- outer:
- while (true) {
- var state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
- printjson(state);
-
- for (var m in state.members) {
- if (state.members[m].state != 2 && state.members[m].state != 1) {
- sleep(10000);
- continue outer;
- }
- }
- return;
- }
-};
-
+load("jstests/replsets/rslib.js");
doTest = function( signal ) {
@@ -30,7 +12,7 @@ doTest = function( signal ) {
/* set slaveDelay to 30 seconds */
var config = replTest.getReplSetConfig();
config.members[2].priority = 0;
- config.members[2].slaveDelay = 30;
+ config.members[2].slaveDelay = 10;
replTest.initiate(config);
@@ -59,11 +41,16 @@ doTest = function( signal ) {
// make sure delayed slave doesn't have it
assert.eq(slave[1].foo.findOne(), null);
- // wait 35 seconds
- sleep(35000);
-
+ for (var i=0; i<8; i++) {
+ assert.eq(slave[1].foo.findOne(), null);
+ sleep(1000);
+ }
+
// now delayed slave should have it
- assert.eq(slave[1].foo.findOne().x, 1);
+ assert.soon(function() {
+ var z = slave[1].foo.findOne();
+ return z && z.x == 1;
+ });
/************* Part 2 *******************/
@@ -79,9 +66,15 @@ doTest = function( signal ) {
assert.eq(slave[0].foo.findOne({_id : 99}).foo, "bar");
assert.eq(slave[1].foo.findOne({_id : 99}), null);
- sleep(35000);
-
- assert.eq(slave[1].foo.findOne({_id : 99}).foo, "bar");
+ for (var i=0; i<8; i++) {
+ assert.eq(slave[1].foo.findOne({_id:99}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = slave[1].foo.findOne({_id : 99});
+ return z && z.foo == "bar";
+ });
/************* Part 3 *******************/
@@ -94,34 +87,53 @@ doTest = function( signal ) {
config.version++;
config.members.push({_id : 3, host : host+":31007",priority:0, slaveDelay:10});
- var admin = master.getSisterDB("admin");
- try {
- var ok = admin.runCommand({replSetReconfig : config});
- assert.eq(ok.ok,1);
- }
- catch(e) {
- print(e);
- }
-
- master = replTest.getMaster().getDB(name);
-
- waitForAllMembers(master);
-
- sleep(15000);
-
+ master = reconfig(replTest, config);
+ master = master.getSisterDB(name);
+
// it should be all caught up now
master.foo.insert({_id : 123, "x" : "foo"});
master.runCommand({getlasterror:1,w:2});
conn.setSlaveOk();
- assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
+
+ for (var i=0; i<8; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = conn.getDB(name).foo.findOne({_id:123});
+ return z != null && z.x == "foo"
+ });
+
+ /************* Part 4 ******************/
- sleep(15000);
+ print("reconfigure slavedelay");
+
+ config.version++;
+ config.members[3].slaveDelay = 15;
- assert.eq(conn.getDB(name).foo.findOne({_id:123}).x, "foo");
+ master = reconfig(replTest, config);
+ master = master.getSisterDB(name);
+ assert.soon(function() {
+ return conn.getDB("local").system.replset.findOne().version == config.version;
+ });
+
+ master.foo.insert({_id : 124, "x" : "foo"});
+
+ for (var i=0; i<13; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id:124}), null);
+ sleep(1000);
+ }
+
+ assert.soon(function() {
+ var z = conn.getDB(name).foo.findOne({_id:124});
+ return z != null && z.x == "foo"
+ });
+
- replTest.stopSet();
+ replTest.stopSet();
}
doTest(15);
diff --git a/jstests/replsets/stale_clustered.js b/jstests/replsets/stale_clustered.js
new file mode 100644
index 0000000..457231e
--- /dev/null
+++ b/jstests/replsets/stale_clustered.js
@@ -0,0 +1,101 @@
+// this tests that slaveOk'd queries in sharded setups get correctly routed when
+// a slave goes into RECOVERING state, and don't break
+
+function prt(s) {
+ print("\nstale_clustered.js " + s);
+ print();
+}
+
+var shardTest = new ShardingTest( name = "clusteredstale" ,
+ numShards = 2 ,
+ verboseLevel = 0 ,
+ numMongos = 2 ,
+ otherParams = { rs : true } )//,
+ //rs0 : { logpath : "$path/mongod.log" },
+ //rs1 : { logpath : "$path/mongod.log" } } );
+
+shardTest.setBalancer( false )
+
+var mongos = shardTest.s0
+var mongosSOK = shardTest.s1
+mongosSOK.setSlaveOk()
+
+var admin = mongos.getDB("admin")
+var config = mongos.getDB("config")
+
+var dbase = mongos.getDB("test")
+var coll = dbase.getCollection("foo")
+var dbaseSOk = mongosSOK.getDB( "" + dbase )
+var collSOk = mongosSOK.getCollection( "" + coll )
+
+
+var rsA = shardTest._rs[0].test
+var rsB = shardTest._rs[1].test
+
+rsA.getMaster().getDB( "test_a" ).dummy.insert( { x : 1 } )
+rsB.getMaster().getDB( "test_b" ).dummy.insert( { x : 1 } )
+
+rsA.awaitReplication()
+rsB.awaitReplication()
+
+prt("1: initial insert")
+
+coll.save({ _id : -1, a : "a", date : new Date() })
+coll.save({ _id : 1, b : "b", date : new Date() })
+
+prt("2: shard collection")
+
+shardTest.shardGo( coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 } )
+
+prt("3: test normal and slaveOk queries")
+
+// Make shardA and rsA the same
+var shardA = shardTest.getShard( coll, { _id : -1 } )
+var shardAColl = shardA.getCollection( "" + coll )
+var shardB = shardTest.getShard( coll, { _id : 1 } )
+
+if( shardA.name == rsB.getURL() ){
+ var swap = rsB
+ rsB = rsA
+ rsA = swap
+}
+
+rsA.awaitReplication()
+rsB.awaitReplication()
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+assert.eq( shardAColl.find().itcount(), 1 )
+assert.eq( shardAColl.findOne()._id, -1 )
+
+prt("5: overflow oplog");
+
+var secs = rsA.getSecondaries()
+var goodSec = secs[0]
+var badSec = secs[1]
+
+rsA.overflow( badSec )
+
+prt("6: stop non-overflowed secondary")
+
+rsA.stop( goodSec, undefined, true )
+
+prt("7: check our regular and slaveok query")
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+
+prt("8: restart both our secondaries clean")
+
+rsA.restart( rsA.getSecondaries(), { remember : true, startClean : true }, undefined, 5 * 60 * 1000 )
+
+prt("9: wait for recovery")
+
+rsA.waitForState( rsA.getSecondaries(), rsA.SECONDARY, 5 * 60 * 1000 )
+
+prt("10: check our regular and slaveok query")
+
+assert.eq( coll.find().itcount(), collSOk.find().itcount() )
+
+prt("DONE\n\n\n");
+
+//shardTest.stop()
+
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
new file mode 100644
index 0000000..3a17b0e
--- /dev/null
+++ b/jstests/replsets/stepdown.js
@@ -0,0 +1,142 @@
+/* check that on a loss of primary, another node doesn't assume primary if it is stale
+ we force a stepDown to test this
+ we use lock+fsync to force secondary to be stale
+*/
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+// do a write
+print("\ndo a write");
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+// lock secondary
+print("\nlock secondary");
+var locked = replTest.liveNodes.slaves[0];
+printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) );
+
+print("\nwaiting 11ish seconds");
+
+sleep(2000);
+
+for (var i = 0; i < 11; i++) {
+ // do another write
+ master.getDB("foo").bar.insert({x:i});
+ sleep(1000);
+}
+
+print("\n do stepdown that should not work");
+
+// this should fail, so we don't need to try/catch
+var result = master.getDB("admin").runCommand({replSetStepDown: 10});
+printjson(result);
+assert.eq(result.ok, 0);
+
+print("\n do stepdown that should work");
+try {
+ master.getDB("admin").runCommand({replSetStepDown: 50, force : true});
+}
+catch (e) {
+ print(e);
+}
+
+var r2 = master.getDB("admin").runCommand({ismaster : 1});
+assert.eq(r2.ismaster, false);
+assert.eq(r2.secondary, true);
+
+print("\nunlock");
+printjson(locked.getDB("admin").$cmd.sys.unlock.findOne());
+
+print("\nreset stepped down time");
+master.getDB("admin").runCommand({replSetFreeze:0});
+master = replTest.getMaster();
+
+print("\nmake 1 config with priorities");
+var config = master.getDB("local").system.replset.findOne();
+print("\nmake 2");
+config.version++;
+config.members[0].priority = 2;
+config.members[1].priority = 1;
+// make sure 1 can stay master once 0 is down
+config.members[0].votes = 0;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+print("\nawait");
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var firstMaster = master;
+print("\nmaster is now "+firstMaster);
+
+try {
+ printjson(master.getDB("admin").runCommand({replSetStepDown : 100, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("\nget a master");
+replTest.getMaster();
+
+assert.soon(function() {
+ var secondMaster = replTest.getMaster();
+ return firstMaster+"" != secondMaster+"";
+ }, 'making sure '+firstMaster+' isn\'t still master', 60000);
+
+
+print("\ncheck shutdown command");
+
+master = replTest.liveNodes.master;
+var slave = replTest.liveNodes.slaves[0];
+var slaveId = replTest.getNodeId(slave);
+
+try {
+ slave.adminCommand({shutdown :1})
+}
+catch (e) {
+ print(e);
+}
+
+print("\nsleeping");
+
+sleep(2000);
+
+print("\nrunning shutdown without force on master: "+master);
+
+result = replTest.getMaster().getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3});
+assert.eq(result.ok, 0);
+
+print("\nsend shutdown command");
+
+var currentMaster = replTest.getMaster();
+try {
+ printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("checking "+currentMaster+" is actually shutting down");
+assert.soon(function() {
+ try {
+ currentMaster.findOne();
+ }
+ catch(e) {
+ return true;
+ }
+ return false;
+});
+
+print("\nOK 1");
+
+replTest.stopSet();
+
+print("OK 2");
diff --git a/jstests/replsets/stepdown2.js b/jstests/replsets/stepdown2.js
new file mode 100755
index 0000000..591fea2
--- /dev/null
+++ b/jstests/replsets/stepdown2.js
@@ -0,0 +1,139 @@
+print("\nstepdown2.js");
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getMaster();
+
+// do a write
+print("\ndo a write");
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+// lock secondary
+print("\nlock secondary");
+var locked = replTest.liveNodes.slaves[0];
+printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) );
+
+print("\nwaiting 11ish seconds");
+
+sleep(3003);
+
+for (var i = 0; i < 11; i++) {
+ // do another write
+ master.getDB("foo").bar.insert({x:i});
+ sleep(1008);
+}
+
+print("\n do stepdown that should not work");
+
+// this should fail, so we don't need to try/catch
+var result = master.getDB("admin").runCommand({replSetStepDown: 10});
+printjson(result);
+assert.eq(result.ok, 0);
+
+print("\n do stepdown that should work");
+try {
+ master.getDB("admin").runCommand({replSetStepDown: 50, force : true});
+}
+catch (e) {
+ print(e);
+}
+
+var r2 = master.getDB("admin").runCommand({ismaster : 1});
+assert.eq(r2.ismaster, false);
+assert.eq(r2.secondary, true);
+
+print("\nunlock");
+printjson(locked.getDB("admin").$cmd.sys.unlock.findOne());
+
+print("\nreset stepped down time");
+master.getDB("admin").runCommand({replSetFreeze:0});
+master = replTest.getMaster();
+
+print("\nmake 1 config with priorities");
+var config = master.getDB("local").system.replset.findOne();
+print("\nmake 2");
+config.version++;
+config.members[0].priority = 2;
+config.members[1].priority = 1;
+// make sure 1 can stay master once 0 is down
+config.members[0].votes = 0;
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch (e) {
+ print(e);
+}
+
+print("\nawait");
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var firstMaster = master;
+print("\nmaster is now "+firstMaster);
+
+try {
+ printjson(master.getDB("admin").runCommand({replSetStepDown : 100, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("\nget a master");
+replTest.getMaster();
+
+assert.soon(function() {
+ var secondMaster = replTest.getMaster();
+ return firstMaster+"" != secondMaster+"";
+ }, 'making sure '+firstMaster+' isn\'t still master', 60000);
+
+
+print("\ncheck shutdown command");
+
+master = replTest.liveNodes.master;
+var slave = replTest.liveNodes.slaves[0];
+var slaveId = replTest.getNodeId(slave);
+
+try {
+ slave.adminCommand({shutdown :1})
+}
+catch (e) {
+ print(e);
+}
+
+print("\nsleeping");
+
+sleep(2000);
+
+print("\nrunning shutdown without force on master: "+master);
+
+result = replTest.getMaster().getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3});
+assert.eq(result.ok, 0);
+
+print("\nsend shutdown command");
+
+var currentMaster = replTest.getMaster();
+try {
+ printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true}));
+}
+catch (e) {
+ print(e);
+}
+
+print("checking "+currentMaster+" is actually shutting down");
+assert.soon(function() {
+ try {
+ currentMaster.findOne();
+ }
+ catch(e) {
+ return true;
+ }
+ return false;
+});
+
+print("\nOK 1 stepdown2.js");
+
+replTest.stopSet();
+
+print("\nOK 2 stepdown2.js");
diff --git a/jstests/replsets/sync1.js b/jstests/replsets/sync1.js
index af16044..a090c1c 100644
--- a/jstests/replsets/sync1.js
+++ b/jstests/replsets/sync1.js
@@ -15,156 +15,172 @@ function pause(s) {
sleep(4000);
}
}
-}
-
-doTest = function (signal) {
-
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
- var nodes = replTest.startSet({ oplogSize: "40" });
-
- sleep(5000);
-
- print("\nsync1.js ********************************************************************** part 0");
- replTest.initiate();
-
- // get master
- print("\nsync1.js ********************************************************************** part 1");
- var master = replTest.getMaster();
- print("\nsync1.js ********************************************************************** part 2");
- var dbs = [master.getDB("foo")];
-
- for (var i in nodes) {
- if (nodes[i] + "" == master + "") {
- continue;
- }
- dbs.push(nodes[i].getDB("foo"));
- nodes[i].setSlaveOk();
- }
-
- print("\nsync1.js ********************************************************************** part 3");
- dbs[0].bar.drop();
-
- print("\nsync1.js ********************************************************************** part 4");
- // slow things down a bit
- dbs[0].bar.ensureIndex({ x: 1 });
- dbs[0].bar.ensureIndex({ y: 1 });
- dbs[0].bar.ensureIndex({ z: 1 });
- dbs[0].bar.ensureIndex({ w: 1 });
-
- var ok = false;
- var inserts = 10000;
-
- print("\nsync1.js ********************************************************************** part 5");
-
- for (var i = 0; i < inserts; i++) {
- dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
- }
-
- var status;
- do {
- sleep(1000);
- status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
- } while (status.members[1].state != 2 || status.members[2].state != 2);
-
- print("\nsync1.js ********************************************************************** part 6");
- dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
-
- print("\nsync1.js ********************************************************************** part 7");
-
- sleep(5000);
-
- var max1;
- var max2;
- var count = 0;
- while (1) {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- print("\nsync1.js couldn't get max1/max2; retrying " + e);
- sleep(2000);
- count++;
- if (count == 50) {
- assert(false, "errored out 50 times");
- }
- continue;
- }
- break;
- }
-
- // wait for a new master to be elected
- sleep(5000);
- var newMaster;
-
- print("\nsync1.js ********************************************************************** part 9");
-
- for (var q = 0; q < 10; q++) {
- // figure out who is master now
- newMaster = replTest.getMaster();
- if (newMaster + "" != master + "")
- break;
- sleep(2000);
- if (q > 6) print("sync1.js zzz....");
- }
-
- assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
-
- print("\nsync1.js new master is " + newMaster + ", old master was " + master);
-
- print("\nsync1.js ********************************************************************** part 9.1");
-
- count = 0;
- countExceptions = 0;
- do {
- try {
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- if (countExceptions++ > 300) {
- print("dbs[1]:");
- try {
- printjson(dbs[1].isMaster());
- printjson(dbs[1].bar.count());
+}
+
+doTest = function (signal) {
+
+ var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var nodes = replTest.startSet({ oplogSize: "40" });
+ print("\nsync1.js ********************************************************************** part 0");
+ replTest.initiate();
+
+ // get master
+ print("\nsync1.js ********************************************************************** part 1");
+ var master = replTest.getMaster();
+ print("\nsync1.js ********************************************************************** part 2");
+ var dbs = [master.getDB("foo")];
+
+ for (var i in nodes) {
+ if (nodes[i] + "" == master + "") {
+ continue;
+ }
+ dbs.push(nodes[i].getDB("foo"));
+ nodes[i].setSlaveOk();
+ }
+
+ print("\nsync1.js ********************************************************************** part 3");
+ dbs[0].bar.drop();
+
+ print("\nsync1.js ********************************************************************** part 4");
+ // slow things down a bit
+ dbs[0].bar.ensureIndex({ x: 1 });
+ dbs[0].bar.ensureIndex({ y: 1 });
+ dbs[0].bar.ensureIndex({ z: 1 });
+ dbs[0].bar.ensureIndex({ w: 1 });
+
+ var ok = false;
+ var inserts = 10000;
+
+ print("\nsync1.js ********************************************************************** part 5");
+
+ for (var i = 0; i < inserts; i++) {
+ dbs[0].bar.insert({ x: "foo" + i, y: "bar" + i, z: i, w: "biz baz bar boo" });
+ }
+
+ var status;
+ var secondaries = 0;
+ var count = 0;
+ do {
+ sleep(1000);
+ status = dbs[0].getSisterDB("admin").runCommand({ replSetGetStatus: 1 });
+
+ occasionally(function() {
+ printjson(status);
+ }, 30);
+
+ secondaries = 0;
+ secondaries += status.members[0].state == 2 ? 1 : 0;
+ secondaries += status.members[1].state == 2 ? 1 : 0;
+ secondaries += status.members[2].state == 2 ? 1 : 0;
+ count++;
+ } while (secondaries < 2 && count < 300);
+
+ assert(count < 300);
+
+ // Need to be careful here, allocating datafiles for the slaves can take a *long* time on slow systems
+ sleep(7000);
+
+ print("\nsync1.js ********************************************************************** part 6");
+ dbs[0].getSisterDB("admin").runCommand({ replSetTest: 1, blind: true });
+
+ print("\nsync1.js ********************************************************************** part 7");
+
+ sleep(5000);
+ // If we start getting error hasNext: false with done alloc datafile msgs - may need to up the sleep again in part 5
+
+
+ var max1;
+ var max2;
+ var count = 0;
+ while (1) {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js couldn't get max1/max2; retrying " + e);
+ sleep(2000);
+ count++;
+ if (count == 50) {
+ assert(false, "errored out 50 times");
+ }
+ continue;
+ }
+ break;
+ }
+
+ // wait for a new master to be elected
+ sleep(5000);
+ var newMaster;
+
+ print("\nsync1.js ********************************************************************** part 9");
+
+ for (var q = 0; q < 10; q++) {
+ // figure out who is master now
+ newMaster = replTest.getMaster();
+ if (newMaster + "" != master + "")
+ break;
+ sleep(2000);
+ if (q > 6) print("sync1.js zzz....");
+ }
+
+ assert(newMaster + "" != master + "", "new master is " + newMaster + ", old master was " + master);
+
+ print("\nsync1.js new master is " + newMaster + ", old master was " + master);
+
+ print("\nsync1.js ********************************************************************** part 9.1");
+
+ count = 0;
+ countExceptions = 0;
+ do {
+ try {
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ max2 = dbs[2].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ if (countExceptions++ > 300) {
+ print("dbs[1]:");
+ try {
+ printjson(dbs[1].isMaster());
+ printjson(dbs[1].bar.count());
printjson(dbs[1].adminCommand({replSetGetStatus : 1}));
- }
- catch (e) { print(e); }
- print("dbs[2]:");
- try {
- printjson(dbs[2].isMaster());
- printjson(dbs[2].bar.count());
+ }
+ catch (e) { print(e); }
+ print("dbs[2]:");
+ try {
+ printjson(dbs[2].isMaster());
+ printjson(dbs[2].bar.count());
printjson(dbs[2].adminCommand({replSetGetStatus : 1}));
- }
- catch (e) { print(e); }
- assert(false, "sync1.js too many exceptions, failing");
- }
- print("\nsync1.js: exception querying; will sleep and try again " + e);
- sleep(3000);
- continue;
- }
-
- print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
-
- // printjson(max1);
- // printjson(max2);
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- pause("fail phase 1");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- } while (max1.z != max2.z);
-
- // okay, now they're caught up. We have a max: max1.z
-
- print("\nsync1.js ********************************************************************** part 10");
-
- // now, let's see if rollback works
+ }
+ catch (e) { print(e); }
+ assert(false, "sync1.js too many exceptions, failing");
+ }
+ print("\nsync1.js: exception querying; will sleep and try again " + e);
+ sleep(3000);
+ continue;
+ }
+
+ print("\nsync1.js waiting for match " + count + " " + Date() + " z[1]:" + max1.z + " z[2]:" + max2.z);
+
+ // printjson(max1);
+ // printjson(max2);
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ pause("fail phase 1");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ } while (max1.z != max2.z);
+
+ // okay, now they're caught up. We have a max: max1.z
+
+ print("\nsync1.js ********************************************************************** part 10");
+
+ // now, let's see if rollback works
wait(function() {
try {
dbs[0].adminCommand({ replSetTest: 1, blind: false });
@@ -180,50 +196,50 @@ doTest = function (signal) {
});
- dbs[0].getMongo().setSlaveOk();
- sleep(5000);
-
- // now this should resync
- print("\nsync1.js ********************************************************************** part 11");
- var max0 = null;
- count = 0;
- do {
- try {
- max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
- max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
- }
- catch (e) {
- print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
- sleep(2000);
- continue;
- }
-
- print("part 11");
- if (max0) {
- print("max0.z:" + max0.z);
- print("max1.z:" + max1.z);
- }
-
- sleep(2000);
-
- count++;
- if (count == 100) {
- printjson(dbs[0].isMaster());
+ dbs[0].getMongo().setSlaveOk();
+ sleep(5000);
+
+ // now this should resync
+ print("\nsync1.js ********************************************************************** part 11");
+ var max0 = null;
+ count = 0;
+ do {
+ try {
+ max0 = dbs[0].bar.find().sort({ z: -1 }).limit(1).next();
+ max1 = dbs[1].bar.find().sort({ z: -1 }).limit(1).next();
+ }
+ catch (e) {
+ print("\nsync1.js part 11 exception on bar.find() will sleep and try again " + e);
+ sleep(2000);
+ continue;
+ }
+
+ print("part 11");
+ if (max0) {
+ print("max0.z:" + max0.z);
+ print("max1.z:" + max1.z);
+ }
+
+ sleep(2000);
+
+ count++;
+ if (count == 100) {
+ printjson(dbs[0].isMaster());
printjson(dbs[0].adminCommand({replSetGetStatus:1}));
- printjson(dbs[1].isMaster());
+ printjson(dbs[1].isMaster());
printjson(dbs[1].adminCommand({replSetGetStatus:1}));
- pause("FAIL part 11");
- assert(false, "replsets/\nsync1.js fails timing out");
- replTest.stopSet(signal);
- return;
- }
- //print("||||| count:" + count);
- //printjson(max0);
- } while (!max0 || max0.z != max1.z);
-
- print("\nsync1.js ********************************************************************** part 12");
- pause("\nsync1.js success");
- replTest.stopSet(signal);
+ pause("FAIL part 11");
+ assert(false, "replsets/\nsync1.js fails timing out");
+ replTest.stopSet(signal);
+ return;
+ }
+ //print("||||| count:" + count);
+ //printjson(max0);
+ } while (!max0 || max0.z != max1.z);
+
+ print("\nsync1.js ********************************************************************** part 12");
+ pause("\nsync1.js success");
+ replTest.stopSet(signal);
}
if( 1 || debugging ) {
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
new file mode 100644
index 0000000..9f6c205
--- /dev/null
+++ b/jstests/replsets/sync2.js
@@ -0,0 +1,48 @@
+
+var replTest = new ReplSetTest({ name: 'testSet', nodes: 5 });
+var nodes = replTest.startSet({ oplogSize: "2" });
+replTest.initiate();
+
+var master = replTest.getMaster();
+var config = master.getDB("local").system.replset.findOne();
+config.version++;
+config.members[0].priority = 2;
+
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+// initial sync
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+master = replTest.bridge();
+
+replTest.partition(0,4);
+replTest.partition(1,2);
+replTest.partition(2,3);
+replTest.partition(3,1);
+
+// 4 is connected to 2
+replTest.partition(4,1);
+replTest.partition(4,3);
+
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+var result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:1000});
+assert.eq(null, result.err, tojson(result));
+
+// 4 is connected to 3
+replTest.partition(4,2);
+replTest.unPartition(4,3);
+
+master.getDB("foo").bar.insert({x:1});
+replTest.awaitReplication();
+
+result = master.getDB("admin").runCommand({getLastError:1,w:5,wtimeout:1000});
+assert.eq(null, result.err, tojson(result));
+
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
new file mode 100644
index 0000000..4e73886
--- /dev/null
+++ b/jstests/replsets/tags.js
@@ -0,0 +1,154 @@
+
+var num = 5;
+var host = getHostName();
+var name = "tags";
+
+var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} );
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : host+":"+port[0], tags : {"server" : "0", "dc" : "ny", "ny" : "1", "rack" : "ny.rk1"}},
+ {_id:1, host : host+":"+port[1], tags : {"server" : "1", "dc" : "ny", "ny" : "2", "rack" : "ny.rk1"}},
+ {_id:2, host : host+":"+port[2], tags : {"server" : "2", "dc" : "ny", "ny" : "3", "rack" : "ny.rk2", "2" : "this"}},
+ {_id:3, host : host+":"+port[3], tags : {"server" : "3", "dc" : "sf", "sf" : "1", "rack" : "sf.rk1"}},
+ {_id:4, host : host+":"+port[4], tags : {"server" : "4", "dc" : "sf", "sf" : "2", "rack" : "sf.rk2"}},
+ ],
+ settings : {
+ getLastErrorModes : {
+ "important" : {"dc" : 2, "server" : 3},
+ "a machine" : {"server" : 1}
+ }
+ }});
+
+var master = replTest.getMaster();
+
+var config = master.getDB("local").system.replset.findOne();
+
+printjson(config);
+var modes = config.settings.getLastErrorModes;
+assert.eq(typeof modes, "object");
+assert.eq(modes.important.dc, 2);
+assert.eq(modes.important.server, 3);
+assert.eq(modes["a machine"]["server"], 1);
+
+config.version++;
+config.members[1].priority = 1.5;
+config.members[2].priority = 2;
+modes.rack = {"sf" : 1};
+modes.niceRack = {"sf" : 2};
+modes["a machine"]["2"] = 1;
+modes.on2 = {"2" : 1}
+
+try {
+ master.getDB("admin").runCommand({replSetReconfig : config});
+}
+catch(e) {
+ print(e);
+}
+
+replTest.awaitReplication();
+
+print("primary should now be 2");
+master = replTest.getMaster();
+config = master.getDB("local").system.replset.findOne();
+printjson(config);
+
+modes = config.settings.getLastErrorModes;
+assert.eq(typeof modes, "object");
+assert.eq(modes.important.dc, 2);
+assert.eq(modes.important.server, 3);
+assert.eq(modes["a machine"]["server"], 1);
+assert.eq(modes.rack["sf"], 1);
+assert.eq(modes.niceRack["sf"], 2);
+
+print("bridging");
+replTest.bridge();
+
+replTest.partition(0, 3);
+replTest.partition(0, 4);
+replTest.partition(1, 3);
+replTest.partition(1, 4);
+replTest.partition(2, 3);
+replTest.partition(2, 4);
+replTest.partition(3, 4);
+print("done bridging");
+
+print("test1");
+print("2 should be primary");
+master = replTest.getMaster();
+
+printjson(master.getDB("admin").runCommand({replSetGetStatus:1}));
+
+var timeout = 20000;
+
+master.getDB("foo").bar.insert({x:1});
+var result = master.getDB("foo").runCommand({getLastError:1,w:"rack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
+replTest.unPartition(1,4);
+
+print("test2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"rack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test3");
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
+replTest.unPartition(3,4);
+
+print("test4");
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("non-existent w");
+result = master.getDB("foo").runCommand({getLastError:1,w:"blahblah",wtimeout:timeout});
+printjson(result);
+assert.eq(result.assertionCode, 14830);
+assert.eq(result.ok, 0);
+
+print("test on2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"on2",wtimeout:0});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test two on the primary");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"a machine",wtimeout:0});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test5");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"important",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+replTest.unPartition(1,3);
+
+replTest.partition(2, 0);
+replTest.partition(2, 1);
+replTest.stop(2);
+
+print("1 must become primary here because otherwise the other members will take too long timing out their old sync threads");
+master = replTest.getMaster();
+
+print("test6");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"niceRack",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, null);
+
+print("test on2");
+master.getDB("foo").bar.insert({x:1});
+result = master.getDB("foo").runCommand({getLastError:1,w:"on2",wtimeout:timeout});
+printjson(result);
+assert.eq(result.err, "timeout");
+
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
new file mode 100644
index 0000000..16dfcdf
--- /dev/null
+++ b/jstests/replsets/tags2.js
@@ -0,0 +1,44 @@
+// Change a getLastErrorMode from 2 to 3 servers
+
+var host = getHostName();
+var replTest = new ReplSetTest( {name: "rstag", nodes: 3, startPort: 31000} );
+var nodes = replTest.startSet();
+var ports = replTest.ports;
+var conf = {_id : "rstag", version: 1, members : [
+ {_id : 0, host : host+":"+ports[0], tags : {"backup" : "A"}},
+ {_id : 1, host : host+":"+ports[1], tags : {"backup" : "B"}},
+ {_id : 2, host : host+":"+ports[2], tags : {"backup" : "C"}} ],
+ settings : {getLastErrorModes : {
+ backedUp : {backup : 2} }} };
+replTest.initiate( conf );
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:1} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+conf.version = 2;
+conf.settings.getLastErrorModes.backedUp.backup = 3;
+master.getDB("admin").runCommand( {replSetReconfig: conf} );
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:2} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+conf.version = 3;
+conf.members[0].priorty = 3;
+conf.members[2].priorty = 0;
+master.getDB("admin").runCommand( {replSetReconfig: conf} );
+
+master = replTest.getMaster();
+var db = master.getDB("test");
+db.foo.insert( {x:3} );
+var result = db.runCommand( {getLastError:1, w:"backedUp", wtimeout:20000} );
+assert.eq (result.err, null);
+
+replTest.stopSet();
diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js
index 0b8da0d..08b1a9c 100644
--- a/jstests/replsets/toostale.js
+++ b/jstests/replsets/toostale.js
@@ -32,7 +32,7 @@ var wait = function(f) {
}
var reconnect = function(a) {
- wait(function() {
+ wait(function() {
try {
a.bar.stats();
return true;
@@ -46,9 +46,14 @@ var reconnect = function(a) {
var name = "toostale"
var replTest = new ReplSetTest( {name: name, nodes: 3});
+var host = getHostName();
var nodes = replTest.startSet();
-replTest.initiate();
+replTest.initiate({_id : name, members : [
+ {_id : 0, host : host+":"+replTest.ports[0]},
+ {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true},
+ {_id : 2, host : host+":"+replTest.ports[2]}
+]});
var master = replTest.getMaster();
var mdb = master.getDB("foo");
@@ -60,7 +65,7 @@ mdb.foo.save({a: 1000});
print("2: initial sync");
replTest.awaitReplication();
-print("3: blind s2");
+print("3: stop s2");
replTest.stop(2);
print("waiting until the master knows the slave is blind");
assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health == 0 });
@@ -82,7 +87,7 @@ while (count != prevCount) {
}
-print("5: unblind s2");
+print("5: restart s2");
replTest.restart(2);
print("waiting until the master knows the slave is not blind");
assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health != 0 });
@@ -106,16 +111,17 @@ replTest.restart(2);
print("8: check s2.state == 3");
-status = master.getDB("admin").runCommand({replSetGetStatus:1});
-while (status.state == 0) {
- print("state is 0: ");
- printjson(status);
- sleep(1000);
- status = master.getDB("admin").runCommand({replSetGetStatus:1});
-}
+assert.soon(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ printjson(status);
+ return status.members && status.members[2].state == 3;
+});
-printjson(status);
-assert.eq(status.members[2].state, 3, 'recovering');
+print("make sure s2 doesn't become primary");
+replTest.stop(0);
+sleep(20000);
+printjson(replTest.nodes[2].getDB("admin").runCommand({isMaster : 1}));
+printjson(replTest.nodes[2].getDB("admin").runCommand({replSetGetStatus : 1}));
-replTest.stopSet(15);
+//replTest.stopSet(15);
diff --git a/jstests/replsets/twosets.js b/jstests/replsets/twosets.js
deleted file mode 100644
index aae1113..0000000
--- a/jstests/replsets/twosets.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// add a node from a different set to the current set
-// I don't know what should happen here.
-
-doTest = function( signal ) {
-
- var orig = new ReplSetTest( {name: 'testSet', nodes: 3} );
- orig.startSet();
- orig.initiate();
- var master = orig.getMaster();
-
- var interloper = new ReplSetTest( {name: 'testSet', nodes: 3, startPort : 31003} );
- interloper.startSet();
- interloper.initiate();
-
- var conf = master.getDB("local").system.replset.findOne();
-
- var nodes = interloper.nodeList();
- var host = nodes[0];
- var id = conf.members.length;
- conf.members.push({_id : id, host : host});
- conf.version++;
-
- try {
- var result = master.getDB("admin").runCommand({replSetReconfig : conf});
- }
- catch(e) {
- print(e);
- }
-
- // now... stuff should blow up?
-
- sleep(10);
-}
-
-doTest(15); \ No newline at end of file
diff --git a/jstests/set7.js b/jstests/set7.js
index b46fe9e..c6d311b 100644
--- a/jstests/set7.js
+++ b/jstests/set7.js
@@ -38,3 +38,19 @@ t.save( {a:[]} );
t.update( {}, {$set:{"a.f":1}} );
assert( db.getLastError() );
assert.eq( [], t.findOne().a );
+
+// SERVER-3750
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1500000":1}} ); // current limit
+assert( db.getLastError() == null );
+
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1500001":1}} ); // 1 over limit
+assert.eq(15891 , db.getLastErrorObj().code );
+
+t.drop();
+t.save( {a:[]} );
+t.update( {}, {$set:{"a.1000000000":1}} ); // way over limit
+assert.eq(15891 , db.getLastErrorObj().code );
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index f28feed..0ca6a83 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -44,7 +44,7 @@ assert.eq( s.normalize( s.config.databases.findOne( { _id : "testDB" } ).primary
assert.eq( numObjs , sdb1.foo.count() , "wrong count after moving datbase that existed before addshard" );
// make sure we can shard the original collections
-sdb1.foo.ensureIndex( { a : 1 } ) // can't shard populated collection without an index
+sdb1.foo.ensureIndex( { a : 1 }, { unique : true } ) // can't shard populated collection without an index
s.adminCommand( { enablesharding : "testDB" } );
s.adminCommand( { shardcollection : "testDB.foo" , key: { a : 1 } } );
s.adminCommand( { split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } } );
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index be4a8b3..4a44b55 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -2,15 +2,18 @@
s = new ShardingTest( "addshard4", 2 , 0 , 1 , {useHostname : true});
-r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 34000});
+r = new ReplSetTest({name : "addshard4", nodes : 3, startPort : 31100});
r.startSet();
var config = r.getReplSetConfig();
config.members[2].priority = 0;
r.initiate(config);
+//Wait for replica set to be fully initialized - could take some time
+//to pre-allocate files on slow systems
+r.awaitReplication();
-var master = r.getMaster().master;
+var master = r.getMaster();
var members = config.members.map(function(elem) { return elem.host; });
var shardName = "addshard4/"+members.join(",");
@@ -20,5 +23,24 @@ print("adding shard "+shardName);
var result = s.adminCommand({"addshard" : shardName});
printjson(result);
+assert.eq(result, true);
+r = new ReplSetTest({name : "addshard42", nodes : 3, startPort : 31200});
+r.startSet();
+
+config = r.getReplSetConfig();
+config.members[2].arbiterOnly = true;
+
+r.initiate(config);
+// Wait for replica set to be fully initialized - could take some time
+// to pre-allocate files on slow systems
+r.awaitReplication();
+master = r.getMaster();
+
+print("adding shard addshard42");
+
+result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
+
+printjson(result);
+assert.eq(result, true);
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
new file mode 100644
index 0000000..1ea61e8
--- /dev/null
+++ b/jstests/sharding/array_shard_key.js
@@ -0,0 +1,127 @@
+// Ensure you can't shard on an array key
+
+var st = new ShardingTest({ name : jsTestName(), shards : 3 })
+
+var mongos = st.s0
+
+var coll = mongos.getCollection( jsTestName() + ".foo" )
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+printjson( mongos.getDB("config").chunks.find().toArray() )
+
+st.printShardingStatus()
+
+print( "1: insert some invalid data" )
+
+var value = null
+
+var checkError = function( shouldError ){
+ var error = coll.getDB().getLastError()
+
+ if( error != null ) printjson( error )
+
+ if( error == null && ! shouldError ) return
+ if( error != null && shouldError ) return
+
+ if( error == null ) print( "No error detected!" )
+ else print( "Unexpected error!" )
+
+ assert( false )
+}
+
+// Insert an object with invalid array key
+coll.insert({ i : [ 1, 2 ] })
+checkError( true )
+
+// Insert an object with valid array key
+coll.insert({ i : 1 })
+checkError( false )
+
+// Update the value with valid other field
+value = coll.findOne({ i : 1 })
+coll.update( value, { $set : { j : 2 } } )
+checkError( false )
+
+// Update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3 ] } ) )
+checkError( true )
+
+// Multi-update the value with invalid other fields
+value = coll.findOne({ i : 1 })
+coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true)
+checkError( true )
+
+// Single update the value with valid other fields
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 3, 4 ] } ), value )
+checkError( true )
+
+// Multi-update the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true)
+checkError( false )
+
+// Query the value with other fields (won't work, but no error)
+value = coll.findOne({ i : 1 })
+coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray()
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) )
+checkError( false )
+
+// Can't remove using multikey, but shouldn't error
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 1 )
+
+value = coll.findOne({ i : 1 })
+coll.remove( Object.extend( value, { i : 1 } ) )
+error = coll.getDB().getLastError()
+assert.eq( error, null )
+assert.eq( coll.find().itcount(), 0 )
+
+printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
+
+// Insert a bunch of data then shard over key which is an array
+var coll = mongos.getCollection( "" + coll + "2" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : [ i, i + 1 ] })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+try {
+ st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+}
+catch( e ){
+ print( "Correctly threw error on sharding with multikey index." )
+}
+
+st.printShardingStatus()
+
+// Insert a bunch of data then shard over key which is not an array
+var coll = mongos.getCollection( "" + coll + "3" )
+for( var i = 0; i < 10; i++ ){
+ // TODO : does not check weird cases like [ i, i ]
+ coll.insert({ i : i })
+ checkError( false )
+}
+
+coll.ensureIndex({ _id : 1, i : 1 })
+
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+
+st.printShardingStatus()
+
+
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
new file mode 100644
index 0000000..8d8d7d7
--- /dev/null
+++ b/jstests/sharding/auth.js
@@ -0,0 +1,177 @@
+
+adminUser = {
+ db : "admin",
+ username : "foo",
+ password : "bar"
+};
+
+testUser = {
+ db : "test",
+ username : "bar",
+ password : "baz"
+};
+
+function login(userObj) {
+ var n = s.getDB(userObj.db).runCommand({getnonce: 1});
+ var a = s.getDB(userObj.db).runCommand({authenticate: 1, user: userObj.username, nonce: n.nonce, key: s.getDB("admin").__pwHash(n.nonce, userObj.username, userObj.password)});
+ printjson(a);
+}
+
+function logout(userObj) {
+ s.getDB(userObj.db).runCommand({logout:1});
+}
+
+function getShardName(rsTest) {
+ var master = rsTest.getMaster();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) { return elem.host; });
+ return config._id+"/"+members.join(",");
+}
+
+var s = new ShardingTest( "auth1", 0 , 0 , 1 , {rs: true, extraOptions : {"keyFile" : "jstests/libs/key1"}, noChunkSize : true});
+
+print("logging in first, if there was an unclean shutdown the user might already exist");
+login(adminUser);
+
+var user = s.getDB("admin").system.users.findOne();
+if (user) {
+ print("user already exists");
+ printjson(user);
+}
+else {
+ print("adding user");
+ s.getDB(adminUser.db).addUser(adminUser.username, adminUser.password);
+}
+
+login(adminUser);
+s.getDB( "config" ).settings.update( { _id : "chunksize" }, {$set : {value : 1 }}, true );
+printjson(s.getDB("config").runCommand({getlasterror:1}));
+printjson(s.getDB("config").settings.find().toArray());
+
+print("restart mongos");
+stopMongoProgram(31000);
+var opts = { port : 31000, v : 0, configdb : s._configDB, keyFile : "jstests/libs/key1", chunkSize : 1 };
+var conn = startMongos( opts );
+s.s = s._mongos[0] = s["s0"] = conn;
+
+login(adminUser);
+
+d1 = new ReplSetTest({name : "d1", nodes : 3, startPort : 31100});
+d1.startSet({keyFile : "jstests/libs/key2"});
+d1.initiate();
+
+print("initiated");
+var shardName = getShardName(d1);
+
+print("adding shard w/out auth "+shardName);
+logout(adminUser);
+
+var result = s.getDB("admin").runCommand({addShard : shardName});
+printjson(result);
+assert.eq(result.errmsg, "unauthorized");
+
+login(adminUser);
+
+print("adding shard w/wrong key "+shardName);
+
+var thrown = false;
+try {
+ result = s.adminCommand({addShard : shardName});
+}
+catch(e) {
+ thrown = true;
+ printjson(e);
+}
+assert(thrown);
+
+print("start rs w/correct key");
+d1.stopSet();
+d1.startSet({keyFile : "jstests/libs/key1"});
+d1.initiate();
+var master = d1.getMaster();
+
+print("adding shard w/auth "+shardName);
+
+result = s.getDB("admin").runCommand({addShard : shardName});
+assert.eq(result.ok, 1, tojson(result));
+
+s.getDB("admin").runCommand({enableSharding : "test"});
+s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+
+s.getDB(testUser.db).addUser(testUser.username, testUser.password);
+
+logout(adminUser);
+
+print("query try");
+var e = assert.throws(function() {
+ conn.getDB("foo").bar.findOne();
+});
+printjson(e);
+
+print("cmd try");
+e = assert.throws(function() {
+ conn.getDB("foo").runCommand({listdbs:1});
+});
+printjson(e);
+
+print("insert try 1");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, "unauthorized");
+
+logout(adminUser);
+
+login(testUser);
+
+print("insert try 2");
+s.getDB("test").foo.insert({x:1});
+result = s.getDB("test").runCommand({getLastError : 1});
+assert.eq(result.err, null);
+
+logout(testUser);
+
+d2 = new ReplSetTest({name : "d2", nodes : 3, startPort : 31200});
+d2.startSet({keyFile : "jstests/libs/key1"});
+d2.initiate();
+
+shardName = getShardName(d2);
+
+print("adding shard "+shardName);
+login(adminUser);
+print("logged in");
+result = s.getDB("admin").runCommand({addShard : shardName})
+
+var num = 100000;
+for (i=0; i<num; i++) {
+ s.getDB("test").foo.insert({x:i, abc : "defg", date : new Date(), str : "all the talk on the market"});
+}
+
+var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
+var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
+var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+
+print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+
+assert(d1Chunks > 0 && d2Chunks > 0 && d1Chunks+d2Chunks == totalChunks);
+
+assert.eq(s.getDB("test").foo.count(), num+1);
+
+s.s.setSlaveOk();
+
+var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+
+var count = 0;
+while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+}
+
+assert.eq(count, 501);
+
+// check that dump doesn't get stuck with auth
+var x = runMongoProgram( "mongodump", "--host", "127.0.0.1:31000", "-d", testUser.db, "-u", testUser.username, "-p", testUser.password);
+
+print("result: "+x);
+
+
+s.stop();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
new file mode 100644
index 0000000..075ab41
--- /dev/null
+++ b/jstests/sharding/count_slaveok.js
@@ -0,0 +1,69 @@
+// Tests count and distinct using slaveOk
+
+var st = new ShardingTest( testName = "countSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.countSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+var connA = conn
+var connB = new Mongo( st.s.host )
+var connC = new Mongo( st.s.host )
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should throw exception, since not slaveOk'd
+assert.eq( 30, coll.find({ i : 0 }).count() )
+assert.eq( 10, coll.distinct("i").length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.find({ i : 0 }).count()
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
new file mode 100644
index 0000000..aedde8f
--- /dev/null
+++ b/jstests/sharding/drop_sharded_db.js
@@ -0,0 +1,62 @@
+// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
+
+var st = new ShardingTest({ name : jsTestName() })
+
+var mongos = st.s0
+var config = mongos.getDB( "config" )
+
+var dbName = "buy"
+var dbA = mongos.getDB( dbName )
+var dbB = mongos.getDB( dbName + "_201107" )
+var dbC = mongos.getDB( dbName + "_201108" )
+
+print( "1: insert some data and colls into all dbs" )
+
+var numDocs = 3000;
+var numColls = 10;
+for( var i = 0; i < numDocs; i++ ){
+ dbA.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbB.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+ dbC.getCollection( "data" + (i % numColls) ).insert({ _id : i })
+}
+
+print( "2: shard the colls ")
+
+for( var i = 0; i < numColls; i++ ){
+
+ var key = { _id : 1 }
+ st.shardColl( dbA.getCollection( "data" + i ), key )
+ st.shardColl( dbB.getCollection( "data" + i ), key )
+ st.shardColl( dbC.getCollection( "data" + i ), key )
+
+}
+
+print( "3: drop the non-suffixed db ")
+
+dbA.dropDatabase()
+
+
+print( "3: ensure only the non-suffixed db was dropped ")
+
+var dbs = mongos.getDBNames()
+for( var i = 0; i < dbs.length; i++ ){
+ assert.neq( dbs, "" + dbA )
+}
+
+assert.eq( 0, config.databases.find({ _id : "" + dbA }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbB }).toArray().length )
+assert.eq( 1, config.databases.find({ _id : "" + dbC }).toArray().length )
+
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbA + "\\..*" ), dropped : true }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbB + "\\..*" ), dropped : false }).toArray().length )
+assert.eq( numColls, config.collections.find({ _id : RegExp( "^" + dbC + "\\..*" ), dropped : false }).toArray().length )
+
+for( var i = 0; i < numColls; i++ ){
+
+ assert.eq( numDocs / numColls, dbB.getCollection( "data" + (i % numColls) ).find().itcount() )
+ assert.eq( numDocs / numColls, dbC.getCollection( "data" + (i % numColls) ).find().itcount() )
+
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index b2070ea..67a9abe 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -97,7 +97,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , "smr1_out" );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T0 " + n );
+ assert.eq( 4 , res.counts.input , "MR T0 " + n );
var x = db[res.result];
assert.eq( 3 , x.find().count() , "MR T1 " + n );
@@ -115,7 +115,7 @@ doMR = function( n ){
var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
printjson( res );
- assert.eq( new NumberLong(4) , res.counts.input , "MR T6 " + n );
+ assert.eq( 4 , res.counts.input , "MR T6 " + n );
var z = {};
res.find().forEach( function(a){ z[a._id] = a.value.count; } );
@@ -173,4 +173,11 @@ catch ( e ){
assert.eq( x , y , "assert format" )
+// isMaster and query-wrapped-command
+isMaster = db.runCommand({isMaster:1});
+assert( isMaster.ismaster );
+assert.eq( 'isdbgrid', isMaster.msg );
+assert.eq( isMaster, db.runCommand({query: {isMaster:1}}) );
+assert.eq( isMaster, db.runCommand({$query: {isMaster:1}}) );
+
s.stop();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index b28d88e..5277d22 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -17,52 +17,79 @@ for ( i=0; i<N; i++ )
db.foo.insert( { _id : i } )
db.getLastError();
x = db.foo.stats();
+assert.eq( "test.foo" , x.ns , "basic1" )
+assert( x.sharded , "basic2" )
assert.eq( N , x.count , "total count" )
assert.eq( N / 2 , x.shards.shard0000.count , "count on shard0000" )
assert.eq( N / 2 , x.shards.shard0001.count , "count on shard0001" )
+assert( x.totalIndexSize > 0 )
+assert( x.numExtents > 0 )
+db.bar.insert( { x : 1 } )
+x = db.bar.stats();
+assert.eq( 1 , x.count , "XXX1" )
+assert.eq( "test.bar" , x.ns , "XXX2" )
+assert( ! x.sharded , "XXX3: " + tojson(x) )
+
+// Fork shell and start pulling back data
start = new Date()
print( "about to fork shell: " + Date() )
-join = startParallelShell( "db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } return true; } ).itcount()" )
+
+// TODO: Still potential problem when our sampling of current ops misses when $where is active -
+// solution is to increase sleep time
+parallelCommand = "try { while(true){" +
+ " db.foo.find( function(){ x = ''; for ( i=0; i<10000; i++ ){ x+=i; } sleep( 1000 ); return true; } ).itcount() " +
+ "}} catch(e){ print('PShell execution ended:'); printjson( e ) }"
+
+join = startParallelShell( parallelCommand )
print( "after forking shell: " + Date() )
+// Get all current $where operations
function getMine( printInprog ){
+
var inprog = db.currentOp().inprog;
+
if ( printInprog )
printjson( inprog )
+
+ // Find all the where queries
var mine = []
for ( var x=0; x<inprog.length; x++ ){
if ( inprog[x].query && inprog[x].query.$where ){
mine.push( inprog[x] )
}
}
+
return mine;
}
-state = 0; // 0 = not found, 1 = killed,
-killTime = null;
+var state = 0; // 0 = not found, 1 = killed,
+var killTime = null;
+var i = 0;
-for ( i=0; i<( 100* 1000 ); i++ ){
+assert.soon( function(){
+
+ // Get all the current operations
mine = getMine( state == 0 && i > 20 );
- if ( state == 0 ){
- if ( mine.length == 0 ){
- sleep(1);
- continue;
- }
+ i++;
+
+ // Wait for the queries to start
+ if ( state == 0 && mine.length > 0 ){
+ // Queries started
state = 1;
+ // Kill all $where
mine.forEach( function(z){ printjson( db.getSisterDB( "admin" ).killOp( z.opid ) ); } )
killTime = new Date()
}
- else if ( state == 1 ){
- if ( mine.length == 0 ){
- state = 2;
- break;
- }
- sleep(1)
- continue;
+ // Wait for killed queries to end
+ else if ( state == 1 && mine.length == 0 ){
+ // Queries ended
+ state = 2;
+ return true;
}
-}
+
+}, "Couldn't kill the $where operations.", 2 * 60 * 1000 )
print( "after loop: " + Date() );
assert( killTime , "timed out waiting too kill last mine:" + tojson(mine) )
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
new file mode 100644
index 0000000..3b7cec4
--- /dev/null
+++ b/jstests/sharding/group_slaveok.js
@@ -0,0 +1,68 @@
+// Tests group using slaveOk
+
+var st = new ShardingTest( testName = "groupSlaveOk",
+ numShards = 1,
+ verboseLevel = 0,
+ numMongos = 1,
+ { rs : true,
+ rs0 : { nodes : 2 }
+ })
+
+var rst = st._rs[0].test
+
+// Insert data into replica set
+var conn = new Mongo( st.s.host )
+conn.setLogLevel( 3 )
+
+var coll = conn.getCollection( "test.groupSlaveOk" )
+coll.drop()
+
+for( var i = 0; i < 300; i++ ){
+ coll.insert( { i : i % 10 } )
+}
+
+// Make sure the writes get through, otherwise we can continue to error these one-at-a-time
+coll.getDB().getLastError()
+
+st.printShardingStatus()
+
+// Wait for client to update itself and replication to finish
+rst.awaitReplication()
+
+var primary = rst.getPrimary()
+var sec = rst.getSecondary()
+
+// Data now inserted... stop the master, since only two in set, other will still be secondary
+rst.stop( rst.getMaster(), undefined, true )
+printjson( rst.status() )
+
+// Wait for the mongos to recognize the slave
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+
+// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+// master is down
+conn.setSlaveOk()
+
+// Should not throw exception, since slaveOk'd
+assert.eq( 10, coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } }).length )
+
+try {
+
+ conn.setSlaveOk( false )
+ coll.group({ key : { i : true } ,
+ reduce : function( obj, ctx ){ ctx.count += 1 } ,
+ initial : { count : 0 } })
+
+ print( "Should not reach here!" )
+ printjson( coll.getDB().getLastError() )
+ assert( false )
+
+}
+catch( e ){
+ print( "Non-slaveOk'd connection failed." )
+}
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
new file mode 100644
index 0000000..6f99449
--- /dev/null
+++ b/jstests/sharding/index1.js
@@ -0,0 +1,174 @@
+// from server 2326 - make sure that sharding only works with unique indices
+
+s = new ShardingTest( "shard_index", 2, 50, 1 )
+
+// Regenerate fully because of SERVER-2782
+for ( var i = 0; i < 10; i++ ) {
+
+ var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
+ coll.drop()
+
+ for ( var j = 0; j < 300; j++ ) {
+ coll.insert( { num : j, x : 1 } )
+ }
+
+ if(i == 0) s.adminCommand( { enablesharding : "" + coll._db } );
+
+ print("\n\n\n\n\nTest # " + i)
+
+ if ( i == 0 ) {
+
+ // Unique index exists, but not the right one.
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!")
+
+ }
+ if ( i == 1 ) {
+
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ }
+ catch(e){
+ print(e)
+ assert( false, "Should be able to shard non-unique index without unique option.")
+ }
+
+ }
+ if ( i == 2 ) {
+ if (false) { // SERVER-3718
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { x : 1, num : 1 } )
+
+ passed = false;
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ passed = true;
+
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with no unique index.")
+ }
+ }
+ if ( i == 3 ) {
+
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex( { num : 1 }, { unique : true })
+ coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique prefix index.")
+ }
+
+ }
+ if ( i == 4 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique id index.")
+ }
+
+ }
+ if ( i == 5 ) {
+
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with unique combination id index.")
+ }
+
+ }
+ if ( i == 6 ) {
+
+ coll.remove()
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ }
+ catch( e ){
+ print(e)
+ assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
+ }
+
+ }
+ if ( i == 7 ) {
+ coll.remove()
+
+ // No index exists
+
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
+ }
+ catch( e ){
+ print(e)
+ assert( !passed, "Should be able to shard collection with no index on shard key.")
+ }
+ }
+ if ( i == 8 ) {
+ if (false) { // SERVER-3718
+ coll.remove()
+
+ // No index exists
+
+ passed = false
+ try{
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ passed = true
+ }
+ catch( e ){
+ print(e)
+ }
+ assert( !passed, "Should not shard collection with unique flag but with no unique index on shard key.")
+ }
+ }
+ if ( i == 9 ) {
+
+ // Unique index exists on a different field as well
+ coll.ensureIndex( { num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 }, { unique : true} )
+
+ passed = false
+ try {
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
+ passed = true
+ } catch (e) {
+ print( e )
+ }
+ assert( !passed, "Should not shard collection when another unique index exists!" )
+ }
+}
+
+s.stop();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index f6ba18a..917f152 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -40,6 +40,6 @@ for ( i=0; i<20; i+= 2 )
db.printShardingStatus()
-assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 120 * 1000 , 2000 )
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
s.stop()
diff --git a/jstests/sharding/migrateMemory.js b/jstests/sharding/migrateMemory.js
new file mode 100644
index 0000000..d321220
--- /dev/null
+++ b/jstests/sharding/migrateMemory.js
@@ -0,0 +1,54 @@
+
+s = new ShardingTest( "migrateMemory" , 2 , 1 , 1 , { chunksize : 1 });
+
+s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" )
+t = db.foo
+
+str = ""
+while ( str.length < 10000 ){
+ str += "asdasdsdasdasdasdas";
+}
+
+data = 0;
+num = 0;
+while ( data < ( 1024 * 1024 * 10 ) ){
+ t.insert( { _id : num++ , s : str } )
+ data += str.length
+}
+
+db.getLastError()
+
+stats = s.chunkCounts( "foo" )
+from = ""
+to = ""
+for ( x in stats ){
+ if ( stats[x] == 0 )
+ to = x
+ else
+ from = x
+}
+
+s.config.chunks.find().sort( { min : 1 } ).forEach( printjsononeline )
+
+print( "from: " + from + " to: " + to )
+printjson( stats )
+
+ss = []
+
+for ( var f = 0; f<num; f += ( 2 * num / t.stats().nchunks ) ){
+ ss.push( s.getServer( "test" ).getDB( "admin" ).serverStatus() )
+ print( f )
+ s.adminCommand( { movechunk : "test.foo" , find : { _id : f } , to : to } )
+}
+
+for ( i=0; i<ss.length; i++ )
+ printjson( ss[i].mem );
+
+
+s.stop()
+
diff --git a/jstests/sharding/multi_mongos1.js b/jstests/sharding/multi_mongos1.js
index cf9ebde..fc7eaf1 100644
--- a/jstests/sharding/multi_mongos1.js
+++ b/jstests/sharding/multi_mongos1.js
@@ -67,4 +67,5 @@ assert.eq( N , viaS2.find().toArray().length , "other B" );
printjson( primary._db._adminCommand( "shardingState" ) );
-s1.stop(); \ No newline at end of file
+
+s1.stop();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
new file mode 100644
index 0000000..ec95dc0
--- /dev/null
+++ b/jstests/sharding/multi_mongos2.js
@@ -0,0 +1,61 @@
+// multi_mongos2.js
+// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
+
+
+// setup sharding with two mongos, s1 and s2
+s1 = new ShardingTest( "multi_mongos1" , 2 , 1 , 2 );
+s2 = s1._mongos[1];
+
+s1.adminCommand( { enablesharding : "test" } );
+s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+
+s1.config.databases.find().forEach( printjson )
+
+// test queries
+
+s1.getDB('test').existing.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing" , find : { _id : 5 } } )
+
+res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+
+assert.eq(1 , res.ok, tojson(res));
+
+printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
+printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
+
+assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
+assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+
+// test stats
+
+s1.getDB('test').existing2.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing2" , find : { _id : 5 } } )
+
+var res = s1.getDB('test').existing2.stats()
+printjson( res )
+assert.eq(true, res.sharded); //SERVER-2828
+assert.eq(true, s2.getDB('test').existing2.stats().sharded);
+
+// test admin commands
+
+s1.getDB('test').existing3.insert({_id:1})
+assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
+assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
+
+s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
+s2.adminCommand( { split : "test.existing3" , find : { _id : 5 } } )
+
+res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3" , find : { _id : 1 } , to : s1.getOther( s1.getServer( "test" ) ).name } );
+assert.eq(1 , res.ok, tojson(res));
+
+
+
+s1.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
new file mode 100644
index 0000000..d35459c
--- /dev/null
+++ b/jstests/sharding/parallel.js
@@ -0,0 +1,38 @@
+numShards = 3
+s = new ShardingTest( "parallel" , numShards , 2 , 2 , { sync : true } );
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+N = 10000;
+
+for ( i=0; i<N; i+=(N/12) ) {
+ s.adminCommand( { split : "test.foo" , middle : { _id : i } } )
+ sh.moveChunk( "test.foo", { _id : i } , "shard000" + Math.floor( Math.random() * numShards ) )
+}
+
+
+for ( i=0; i<N; i++ )
+ db.foo.insert( { _id : i } )
+db.getLastError();
+
+
+doCommand = function( dbname , cmd ) {
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : db.getMongo().host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+ x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
+ host : s._mongos[1].host , parallel : 2 , seconds : 2 } )
+ printjson(x)
+}
+
+doCommand( "test" , { dbstats : 1 } )
+doCommand( "config" , { dbstats : 1 } )
+
+x = s.getDB( "config" ).stats()
+assert( x.ok , tojson(x) )
+printjson(x)
+
+s.stop()
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 7132563..e27316e 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -4,9 +4,18 @@ s = new ShardingTest( "shard3" , 2 , 1 , 2 );
s2 = s._mongos[1];
+db = s.getDB( "test" )
s.adminCommand( { enablesharding : "test" } );
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+assert( sh.getBalancerState() , "A1" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A2" )
+sh.setBalancerState( true )
+assert( sh.getBalancerState() , "A3" )
+sh.setBalancerState( false )
+assert( ! sh.getBalancerState() , "A4" )
+
s.config.databases.find().forEach( printjson )
a = s.getDB( "test" ).foo;
@@ -53,6 +62,7 @@ function doCounts( name , total , onlyItCounts ){
var total = doCounts( "before wrong save" )
secondary.save( { num : -3 } );
+printjson( secondary.getDB().getLastError() )
doCounts( "after wrong save" , total , true )
e = a.find().explain();
assert.eq( 3 , e.n , "ex1" )
@@ -127,7 +137,7 @@ print( "*** ready to call dropDatabase" )
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
// Waiting for SERVER-2253
-// assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
+assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index 70c5ed7..1b58cc7 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -103,4 +103,7 @@ assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
poolStats( "at end" )
print( summary )
+
+assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ) } )
+
s.stop();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 5d185a5..de3d63e 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -75,7 +75,8 @@ function go() {
return false;
}
return true;
- });
+ }, "Queries took too long to complete correctly.",
+ 2 * 60 * 1000 );
// Done
routerSpec.end()
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
new file mode 100644
index 0000000..e27c054
--- /dev/null
+++ b/jstests/sharding/shard_keycount.js
@@ -0,0 +1,45 @@
+// Tests splitting a chunk twice
+
+s = new ShardingTest( "shard1" , 2, 0, 1, /* chunkSize */1);
+
+dbName = "test"
+collName = "foo"
+ns = dbName + "." + collName
+
+db = s.getDB( dbName );
+
+for(var i = 0; i < 10; i++){
+ db.foo.insert({ _id : i })
+}
+
+// Enable sharding on DB
+s.adminCommand( { enablesharding : dbName } );
+
+// Enable sharding on collection
+s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
+
+// Kill balancer
+s.config.settings.update({ _id: "balancer" }, { $set : { stopped: true } }, true )
+
+// Split into two chunks
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll = db.getCollection( collName )
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+coll.update({ _id : 3 }, { _id : 3 })
+
+// Split chunk again
+// FAILS since the key count is based on the full index, not the chunk itself
+// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
+// in chunk with bounds _id : 0 => 5
+s.adminCommand({ split : ns, find : { _id : 3 } })
+
+s.stop();
diff --git a/jstests/sharding/sharding_with_keyfile.js b/jstests/sharding/sharding_with_keyfile.js
new file mode 100644
index 0000000..94aea57
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.js
@@ -0,0 +1,69 @@
+// Tests sharding with a key file
+
+var st = new ShardingTest({ name : jsTestName(),
+ shards : 2,
+ mongos : 1,
+ keyFile : keyFile = "jstests/sharding/" + jsTestName() + ".key" })
+
+// Make sure all our instances got the key
+var configs = st._configDB.split(",")
+for( var i = 0; i < configs.length; i++ ) configs[i] = new Mongo( configs[i] )
+var shards = st._connections
+var mongoses = st._mongos
+
+for( var i = 0; i < configs.length; i++ )
+ assert.eq( configs[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < shards.length; i++ )
+ assert.eq( shards[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+for( var i = 0; i < mongoses.length; i++ )
+ assert.eq( mongoses[i].getDB("admin").runCommand({ getCmdLineOpts : 1 }).parsed.keyFile, keyFile )
+
+var mongos = st.s0
+var coll = mongos.getCollection( "test.foo" )
+
+st.shardColl( coll, { _id : 1 } )
+
+// Create an index so we can find by num later
+coll.ensureIndex({ insert : 1 })
+
+// For more logging
+// mongos.getDB("admin").runCommand({ setParameter : 1, logLevel : 3 })
+
+print( "INSERT!" )
+
+// Insert a bunch of data
+var toInsert = 2000
+for( var i = 0; i < toInsert; i++ ){
+ coll.insert({ my : "test", data : "to", insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "UPDATE!" )
+
+// Update a bunch of data
+var toUpdate = toInsert
+for( var i = 0; i < toUpdate; i++ ){
+ var id = coll.findOne({ insert : i })._id
+ coll.update({ insert : i, _id : id }, { $inc : { counter : 1 } })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+print( "DELETE" )
+
+// Remove a bunch of data
+var toDelete = toInsert / 2
+for( var i = 0; i < toDelete; i++ ){
+ coll.remove({ insert : i })
+}
+
+assert.eq( coll.getDB().getLastError(), null )
+
+// Make sure the right amount of data is there
+assert.eq( coll.find().count(), toInsert / 2 )
+
+// Finish
+st.stop()
diff --git a/jstests/sharding/sharding_with_keyfile.key b/jstests/sharding/sharding_with_keyfile.key
new file mode 100755
index 0000000..fe3344b
--- /dev/null
+++ b/jstests/sharding/sharding_with_keyfile.key
@@ -0,0 +1,3 @@
+aBcDeFg
+1010101
+JJJJJJJ \ No newline at end of file
diff --git a/jstests/sharding/sync6.js b/jstests/sharding/sync6.js
new file mode 100644
index 0000000..233534b
--- /dev/null
+++ b/jstests/sharding/sync6.js
@@ -0,0 +1,81 @@
+// Test that distributed lock forcing does not result in inconsistencies, using a
+// fast timeout.
+
+// Note that this test will always have random factors, since we can't control the
+// thread scheduling.
+
+test = new SyncCCTest( "sync6", { logpath : "/dev/null" } )
+
+// Startup another process to handle our commands to the cluster, mostly so it's
+// easier to read.
+var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} )
+// { logpath : "/data/db/syncCommander/mongod.log" } );
+
+// Up the log level for this test
+commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 1 } )
+
+// Have lots of threads, so use larger i
+// Can't test too many, we get socket exceptions... possibly due to the
+// javascript console.
+for ( var i = 8; i < 9; i++ ) {
+
+ // Our force time is 4 seconds
+ // Slower machines can't keep up the LockPinger rate, which can lead to lock failures
+ // since our locks are only valid if the LockPinger pings faster than the force time.
+ // Actual lock timeout is 15 minutes, so a few seconds is extremely aggressive
+ var takeoverMS = 4000;
+
+ // Generate valid sleep and skew for this timeout
+ var threadSleepWithLock = takeoverMS / 2;
+ var configServerTimeSkew = [ 0, 0, 0 ]
+ for ( var h = 0; h < 3; h++ ) {
+ // Skew by 1/30th the takeover time either way, at max
+ configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 )
+ // Make skew pos or neg
+ configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1;
+ }
+
+ // Build command
+ command = { _testDistLockWithSkew : 1 }
+
+ // Basic test parameters
+ command["lockName"] = "TimeSkewFailNewTest_lock_" + i;
+ command["host"] = test.url
+ command["seed"] = i
+ command["numThreads"] = ( i % 50 ) + 1
+
+ // Critical values so we're sure of correct operation
+ command["takeoverMS"] = takeoverMS
+ command["wait"] = 4 * takeoverMS // so we must force the lock
+ command["skewHosts"] = configServerTimeSkew
+ command["threadWait"] = threadSleepWithLock
+
+ // Less critical test params
+
+ // 1/3 of threads will not release the lock
+ command["hangThreads"] = 3
+ // Amount of time to wait before trying lock again
+ command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4)
+ // Amount of total clock skew possible between locking threads (processes)
+ // This can be large now.
+ command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000
+
+ // Double-check our sleep, host skew, and takeoverMS values again
+
+ // At maximum, our threads must sleep only half the lock timeout time.
+ assert( command["threadWait"] <= command["takeoverMS"] / 2 )
+ for ( var h = 0; h < command["skewHosts"].length; h++ ) {
+ // At maximum, our config server time skew needs to be less than 1/30th
+ // the total time skew (1/60th either way).
+ assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) )
+ }
+
+ result = commandConn.getDB( "admin" ).runCommand( command )
+ printjson( result )
+ printjson( command )
+ assert( result.ok, "Skewed threads did not increment correctly." );
+
+}
+
+stopMongoProgram( 30004 )
+test.stop();
diff --git a/jstests/sharding/sync7.js b/jstests/sharding/sync7.js
new file mode 100644
index 0000000..a8ff094
--- /dev/null
+++ b/jstests/sharding/sync7.js
@@ -0,0 +1,63 @@
+// Test that the clock skew of the distributed lock disallows getting locks for moving and splitting.
+
+s = new ShardingTest( "moveDistLock", 3, 0, undefined, { sync : true } );
+
+s._connections[0].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 15000 } )
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : -16000 } )
+
+// We need to start another mongos after skewing the clock, since the first mongos will have already
+// tested the config servers (via the balancer) before we manually skewed them
+otherMongos = startMongos( { port : 30020, v : 0, configdb : s._configDB } );
+
+// Initialize DB data
+initDB = function(name) {
+ var db = s.getDB( name );
+ var c = db.foo;
+ c.save( { a : 1 } );
+ c.save( { a : 2 } );
+ c.save( { a : 3 } );
+ assert( 3, c.count() );
+
+ return s.getServer( name );
+}
+
+from = initDB( "test1" );
+to = s.getAnother( from );
+
+s.printShardingStatus();
+
+// Make sure we can't move when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 0, "Move command should not have succeeded!" )
+
+// Enable sharding on DB and collection
+result = otherMongos.getDB("admin").runCommand( { enablesharding : "test1" } );
+result = otherMongos.getDB("test1").foo.ensureIndex( { a : 1 } );
+result = otherMongos.getDB("admin").runCommand( { shardcollection : "test1.foo", key : { a : 1 } } );
+print(" Collection Sharded! ")
+
+// Make sure we can't split when our clock skew is so high
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+assert.eq( result.ok, 0, "Split command should not have succeeded!")
+
+// Adjust clock back in bounds
+s._connections[1].getDB( "admin" ).runCommand( { _skewClockCommand : 1, skew : 0 } )
+print(" Clock adjusted back to in-bounds. ");
+
+// Make sure we can now split
+result = otherMongos.getDB( "admin" ).runCommand( { split : "test1.foo", find : { a : 2 } } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Split command should have succeeded!")
+
+// Make sure we can now move
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : to.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded!" )
+
+// Make sure we can now move again (getting the lock twice)
+result = otherMongos.getDB( "admin" ).runCommand( { moveprimary : "test1", to : from.name } );
+s.printShardingStatus();
+assert.eq( result.ok, 1, "Move command should have succeeded again!" )
+
+s.stop();
diff --git a/jstests/shell1.js b/jstests/shell1.js
new file mode 100644
index 0000000..d2da3b0
--- /dev/null
+++ b/jstests/shell1.js
@@ -0,0 +1,6 @@
+
+x = 1;
+
+shellHelper( "show", "tables;" )
+shellHelper( "show", "tables" )
+shellHelper( "show", "tables ;" )
diff --git a/jstests/shellkillop.js b/jstests/shellkillop.js
index 580d4c8..d903f25 100644
--- a/jstests/shellkillop.js
+++ b/jstests/shellkillop.js
@@ -1,65 +1,61 @@
-baseName = "jstests_shellkillop";
-
-// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
-retry = false;
-
-function testShellAutokillop() {
-
-if (_isWindows()) {
- print("shellkillop.js not testing on windows, as functionality is missing there");
- print("shellkillop.js see http://jira.mongodb.org/browse/SERVER-1451");
-}
-else {
- db[baseName].drop();
-
- print("shellkillop.js insert data");
- for (i = 0; i < 100000; ++i) {
- db[baseName].insert({ i: 1 });
- }
- assert.eq(100000, db[baseName].count());
-
- // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
- // it's just for testing purposes and thus not in the shell help
- var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
- print("shellkillop.js evalStr:" + evalStr);
- spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
-
- sleep(100);
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
- retry = false;
-
- stopMongoProgramByPid(spawn);
-
- sleep(100);
-
- print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
-
- var inprog = db.currentOp().inprog;
- for (i in inprog) {
- if (inprog[i].ns == "test." + baseName)
- throw "shellkillop.js op is still running: " + tojson( inprog[i] );
- }
-
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
- retry = false;
-}
-
-}
-
-for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
- try {
- testShellAutokillop();
- } catch (e) {
- if ( !retry ) {
- throw e;
- }
- printjson( e );
- print( "retrying..." );
- }
-}
-
-assert( !retry, "retried too many times" );
-
-print("shellkillop.js SUCCESS");
+baseName = "jstests_shellkillop";
+
+// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
+retry = false;
+
+function testShellAutokillop() {
+
+if (true) { // toggle to disable test
+ db[baseName].drop();
+
+ print("shellkillop.js insert data");
+ for (i = 0; i < 100000; ++i) {
+ db[baseName].insert({ i: 1 });
+ }
+ assert.eq(100000, db[baseName].count());
+
+ // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
+ // it's just for testing purposes and thus not in the shell help
+ var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
+ print("shellkillop.js evalStr:" + evalStr);
+ spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
+
+ sleep(100);
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
+ retry = false;
+
+ stopMongoProgramByPid(spawn);
+
+ sleep(100);
+
+ print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
+
+ var inprog = db.currentOp().inprog;
+ for (i in inprog) {
+ if (inprog[i].ns == "test." + baseName)
+ throw "shellkillop.js op is still running: " + tojson( inprog[i] );
+ }
+
+ retry = true;
+ assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
+ retry = false;
+}
+
+}
+
+for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
+ try {
+ testShellAutokillop();
+ } catch (e) {
+ if ( !retry ) {
+ throw e;
+ }
+ printjson( e );
+ print( "retrying..." );
+ }
+}
+
+assert( !retry, "retried too many times" );
+
+print("shellkillop.js SUCCESS");
diff --git a/jstests/shellspawn.js b/jstests/shellspawn.js
index 6b713f8..4f550b9 100644
--- a/jstests/shellspawn.js
+++ b/jstests/shellspawn.js
@@ -9,8 +9,10 @@ if ( typeof( _startMongoProgram ) == "undefined" ){
}
else {
spawn = startMongoProgramNoConnect( "mongo", "--port", myPort(), "--eval", "sleep( 2000 ); db.getCollection( '" + baseName + "' ).save( {a:1} );" );
-
- assert.soon( function() { return 1 == t.count(); } );
+
+// assert.soon( function() { return 1 == t.count(); } );
+ // SERVER-2784 debugging - error message overwritten to indicate last count value.
+ assert.soon( "count = t.count(); msg = 'did not reach expected count, last value: ' + t.count(); 1 == count;" );
stopMongoProgramByPid( spawn );
diff --git a/jstests/skip1.js b/jstests/skip1.js
new file mode 100644
index 0000000..c620fb0
--- /dev/null
+++ b/jstests/skip1.js
@@ -0,0 +1,15 @@
+// SERVER-2845 When skipping objects without loading them, they shouldn't be
+// included in the nscannedObjects count.
+
+if ( 0 ) { // SERVER-2845
+t = db.jstests_skip1;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.save( {a:5} );
+t.save( {a:5} );
+t.save( {a:5} );
+
+assert.eq( 3, t.find( {a:5} ).skip( 2 ).explain().nscanned );
+assert.eq( 1, t.find( {a:5} ).skip( 2 ).explain().nscannedObjects );
+} \ No newline at end of file
diff --git a/jstests/slowNightly/background.js b/jstests/slowNightly/background.js
new file mode 100644
index 0000000..d1d0047
--- /dev/null
+++ b/jstests/slowNightly/background.js
@@ -0,0 +1,51 @@
+// background indexing test during inserts.
+
+assert( db.getName() == "test" );
+
+t = db.bg1;
+t.drop();
+
+var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+
+for( var i = 0; i < 100000; i++ ) {
+ t.insert({y:'aaaaaaaaaaaa',i:i});
+ if( i % 10000 == 0 ) {
+ db.getLastError();
+ print(i);
+ }
+}
+
+//db.getLastError();
+
+// start bg indexing
+a.system.indexes.insert({ns:"test.bg1", key:{i:1}, name:"i_1", background:true});
+
+// add more data
+
+for( var i = 0; i < 100000; i++ ) {
+ t.insert({i:i});
+ if( i % 10000 == 0 ) {
+ printjson( db.currentOp() );
+ db.getLastError();
+ print(i);
+ }
+}
+
+printjson( db.getLastErrorObj() );
+
+printjson( db.currentOp() );
+
+for( var i = 0; i < 40; i++ ) {
+ if( db.currentOp().inprog.length == 0 )
+ break;
+ print("waiting");
+ sleep(1000);
+}
+
+printjson( a.getLastErrorObj() );
+
+var idx = t.getIndexes();
+// print("indexes:");
+// printjson(idx);
+
+assert( idx[1].key.i == 1 );
diff --git a/jstests/slowNightly/command_line_parsing.js b/jstests/slowNightly/command_line_parsing.js
index 38c7324..ba7b136 100644
--- a/jstests/slowNightly/command_line_parsing.js
+++ b/jstests/slowNightly/command_line_parsing.js
@@ -7,3 +7,15 @@ var baseName = "jstests_slowNightly_command_line_parsing";
var m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--notablescan" );
m.getDB( baseName ).getCollection( baseName ).save( {a:1} );
assert.throws( function() { m.getDB( baseName ).getCollection( baseName ).find( {a:1} ).toArray() } );
+
+// test config file
+var m2 = startMongod( "--port", port+2, "--dbpath", "/data/db/" + baseName +"2", "--config", "jstests/libs/testconfig");
+var m2result = {
+ "parsed" : {
+ "config" : "jstests/libs/testconfig",
+ "dbpath" : "/data/db/jstests_slowNightly_command_line_parsing2",
+ "fastsync" : "true",
+ "port" : 31002
+ }
+};
+assert( friendlyEqual(m2result.parsed, m2.getDB("admin").runCommand( "getCmdLineOpts" ).parsed) );
diff --git a/jstests/slowNightly/dur_big_atomic_update.js b/jstests/slowNightly/dur_big_atomic_update.js
index ffb0d83..800b4b8 100644
--- a/jstests/slowNightly/dur_big_atomic_update.js
+++ b/jstests/slowNightly/dur_big_atomic_update.js
@@ -23,6 +23,23 @@ err = d.getLastErrorObj();
assert(err.err == null);
assert(err.n == 1024);
+d.dropDatabase();
+
+for (var i=0; i<1024; i++){
+ d.foo.insert({_id:i});
+}
+
+// Do it again but in a db.eval
+d.eval(
+ function(host, big_string) {
+ new Mongo(host).getDB("test").foo.update({}, {$set: {big_string: big_string}}, false, /*multi*/true)
+ }, conn.host, big_string); // Can't pass in connection or DB objects
+
+err = d.getLastErrorObj();
+
+assert(err.err == null);
+assert(err.n == 1024);
+
// free up space
d.dropDatabase();
diff --git a/jstests/slowNightly/dur_remove_old_journals.js b/jstests/slowNightly/dur_remove_old_journals.js
index 3c57c12..1e81bee 100644
--- a/jstests/slowNightly/dur_remove_old_journals.js
+++ b/jstests/slowNightly/dur_remove_old_journals.js
@@ -33,20 +33,19 @@ sleep(sleepSecs*1000);
files = listFiles(PATH + "/journal")
printjson(files);
-
-var nfiles = 0;
-files.forEach(function (file) {
- assert.eq('string', typeof (file.name)); // sanity checking
- if (/prealloc/.test(file.name)) {
- ;
- }
- else {
- nfiles++;
- assert(!(/j\._[01]/.test(file.name)), "Old journal file still exists: " + file.name);
- }
-})
-
-assert.eq(2, nfiles); // j._2 and lsn
+
+var nfiles = 0;
+files.forEach(function (file) {
+ assert.eq('string', typeof (file.name)); // sanity checking
+ if (/prealloc/.test(file.name)) {
+ ;
+ }
+ else {
+ nfiles++;
+ }
+})
+
+assert.eq(2, nfiles); // latest journal file and lsn
stopMongod(30001);
diff --git a/jstests/slowNightly/geo_axis_aligned.js b/jstests/slowNightly/geo_axis_aligned.js
new file mode 100644
index 0000000..0161ecc
--- /dev/null
+++ b/jstests/slowNightly/geo_axis_aligned.js
@@ -0,0 +1,108 @@
+// Axis aligned circles - hard-to-find precision errors possible with exact distances here
+
+t = db.axisaligned
+t.drop();
+
+scale = [ 1, 10, 1000, 10000 ]
+bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ]
+radius = [ 0.0001, 0.001, 0.01, 0.1 ]
+center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ]
+
+bound = []
+for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] );
+
+// Scale all our values to test different sizes
+radii = []
+centers = []
+bounds = []
+
+for( var s = 0; s < scale.length; s++ ){
+ for ( var i = 0; i < radius.length; i++ ) {
+ radii.push( radius[i] * scale[s] )
+ }
+
+ for ( var j = 0; j < center.length; j++ ) {
+ centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] )
+ bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] )
+ }
+
+}
+
+radius = radii
+center = centers
+bound = bounds
+
+
+for ( var b = 0; b < bits.length; b++ ) {
+
+
+ printjson( radius )
+ printjson( centers )
+
+ for ( var i = 0; i < radius.length; i++ ) {
+ for ( var j = 0; j < center.length; j++ ) {
+
+ printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+
+ t.drop()
+
+ // Make sure our numbers are precise enough for this test
+ if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ continue;
+
+ t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
+ t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
+ t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
+ t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
+ t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
+
+ t.ensureIndex( { loc : "2d" }, { max : bound[j][1], min : bound[j][0], bits : bits[b] } );
+
+ if( db.getLastError() ) continue;
+
+ print( "DOING WITHIN QUERY ")
+ r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+
+ //printjson( r.toArray() );
+
+ assert.eq( 5, r.count() );
+
+ // FIXME: surely code like this belongs in utils.js.
+ a = r.toArray();
+ x = [];
+ for ( k in a )
+ x.push( a[k]["_id"] )
+ x.sort()
+ assert.eq( [ 1, 2, 3, 4, 5 ], x );
+
+ print( " DOING NEAR QUERY ")
+ //printjson( center[j] )
+ r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } )
+ assert.eq( 5, r.count() );
+
+ print( " DOING DIST QUERY ")
+
+ a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results
+ assert.eq( 5, a.length );
+
+ //printjson( a );
+
+ var distance = 0;
+ for( var k = 0; k < a.length; k++ ){
+ //print( a[k].dis )
+ //print( distance )
+ assert.gte( a[k].dis, distance );
+ //printjson( a[k].obj )
+ //print( distance = a[k].dis );
+ }
+
+ r = t.find( { loc : { $within : { $box : [ [ center[j][0] - radius[i], center[j][1] - radius[i] ], [ center[j][0] + radius[i], center[j][1] + radius[i] ] ] } } }, { _id : 1 } )
+ assert.eq( 9, r.count() );
+
+ }
+ }
+} \ No newline at end of file
diff --git a/jstests/slowNightly/geo_mnypts.js b/jstests/slowNightly/geo_mnypts.js
new file mode 100644
index 0000000..ac40651
--- /dev/null
+++ b/jstests/slowNightly/geo_mnypts.js
@@ -0,0 +1,51 @@
+// Test sanity of geo queries with a lot of points
+
+var coll = db.testMnyPts
+coll.drop()
+
+var totalPts = 500 * 1000
+
+// Add points in a 100x100 grid
+for( var i = 0; i < totalPts; i++ ){
+ var ii = i % 10000
+ coll.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] })
+}
+
+coll.ensureIndex({ loc : "2d" })
+
+// Check that quarter of points in each quadrant
+for( var i = 0; i < 4; i++ ){
+ var x = i % 2
+ var y = Math.floor( i / 2 )
+
+ var box = [[0, 0], [49, 49]]
+ box[0][0] += ( x == 1 ? 50 : 0 )
+ box[1][0] += ( x == 1 ? 50 : 0 )
+ box[0][1] += ( y == 1 ? 50 : 0 )
+ box[1][1] += ( y == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() )
+ assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() )
+
+}
+
+// Check that half of points in each half
+for( var i = 0; i < 2; i++ ){
+
+ var box = [[0, 0], [49, 99]]
+ box[0][0] += ( i == 1 ? 50 : 0 )
+ box[1][0] += ( i == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() )
+ assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() )
+
+}
+
+// Check that all but corner set of points in radius
+var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ]
+
+assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() )
+assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() )
+
+
+
diff --git a/jstests/slowNightly/geo_polygon.js b/jstests/slowNightly/geo_polygon.js
new file mode 100644
index 0000000..25bf026
--- /dev/null
+++ b/jstests/slowNightly/geo_polygon.js
@@ -0,0 +1,53 @@
+t = db.geo_polygon4;
+t.drop();
+
+shouldRun = true;
+
+bi = db.adminCommand( "buildinfo" ).sysInfo
+if ( bi.indexOf( "erh2" ) >= 0 ){
+ // this machine runs this test very slowly
+ // it seems to be related to osx 10.5
+ // if this machine gets upgraded, we should remove this check
+ // the os x debug builders still run thistest, so i'm not worried about it
+ shouldRun = false;
+}
+
+if ( shouldRun ) {
+
+ num = 0;
+ for ( x = -180; x < 180; x += .5 ){
+ for ( y = -180; y < 180; y += .5 ){
+ o = { _id : num++ , loc : [ x , y ] };
+ t.save( o );
+ }
+ }
+
+ var numTests = 31;
+ for( var n = 0; n < numTests; n++ ){
+ t.dropIndexes()
+ t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+
+ assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true);
+ assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : [ [-180,-180], [-180,180], [180,180], [180,-180] ] } } } ).count() , "Bounding Box Test" );
+
+ assert.eq( 441 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0] ] } } } ).count() , "Square Test" );
+ assert.eq( 25 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0] ] } } } ).count() , "Square Test 2" );
+
+ if(1){ // SERVER-3726
+ // Points exactly on diagonals may be in or out, depending on how the error calculating the slope falls.
+ assert.between( 341 - 18 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0], [5,5] ] } } } ).count(), 341, "Square Missing Chunk Test", true );
+ assert.between( 21 - 2 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0], [1,1] ] } } } ).count(), 21 , "Square Missing Chunk Test 2", true );
+ }
+
+ assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [0,0]] }}} ).count() , "Point Test" );
+
+ // SERVER-3725
+ {
+ assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,0], [2,0]] }}} ).count() , "Line Test 1" );
+ assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [1,0]] }}} ).count() , "Line Test 2" );
+ assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,2], [0,1], [0,0]] }}} ).count() , "Line Test 3" );
+ }
+
+ assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,1], [0,0], [0,0]] }}} ).count() , "Line Test 4" );
+ }
+}
diff --git a/jstests/slowNightly/index_check10.js b/jstests/slowNightly/index_check10.js
new file mode 100644
index 0000000..be94be2
--- /dev/null
+++ b/jstests/slowNightly/index_check10.js
@@ -0,0 +1,133 @@
+// Randomized index testing with initial btree constructed using btree builder.
+// Also uses large strings.
+
+Random.setRandomSeed();
+
+t = db.test_index_check10;
+
+function doIt( indexVersion ) {
+
+ t.drop();
+
+ function sort() {
+ var sort = {};
+ for( var i = 0; i < n; ++i ) {
+ sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
+ }
+ return sort;
+ }
+
+ var fields = [ 'a', 'b', 'c', 'd', 'e' ];
+ n = Random.randInt( 5 ) + 1;
+ var idx = sort();
+
+ var chars = "abcdefghijklmnopqrstuvwxyz";
+
+ function obj() {
+ var ret = {};
+ for( var i = 0; i < n; ++i ) {
+ ret[ fields[ i ] ] = r();
+ }
+ return ret;
+ }
+
+ function r() {
+ var len = Random.randInt( 1000 / n );
+ buf = "";
+ for( var i = 0; i < len; ++i ) {
+ buf += chars.charAt( Random.randInt( chars.length ) );
+ }
+ return buf;
+ }
+
+ function check() {
+ var v = t.validate();
+ if ( !t.valid ) {
+ printjson( t );
+ assert( t.valid );
+ }
+ var spec = {};
+ for( var i = 0; i < n; ++i ) {
+ if ( Random.rand() > 0.5 ) {
+ var bounds = [ r(), r() ];
+ if ( bounds[ 0 ] > bounds[ 1 ] ) {
+ bounds.reverse();
+ }
+ var s = {};
+ if ( Random.rand() > 0.5 ) {
+ s[ "$gte" ] = bounds[ 0 ];
+ } else {
+ s[ "$gt" ] = bounds[ 0 ];
+ }
+ if ( Random.rand() > 0.5 ) {
+ s[ "$lte" ] = bounds[ 1 ];
+ } else {
+ s[ "$lt" ] = bounds[ 1 ];
+ }
+ spec[ fields[ i ] ] = s;
+ } else {
+ var vals = []
+ for( var j = 0; j < Random.randInt( 15 ); ++j ) {
+ vals.push( r() );
+ }
+ spec[ fields[ i ] ] = { $in: vals };
+ }
+ }
+ s = sort();
+ c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
+ try {
+ c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
+ } catch( e ) {
+ // may assert if too much data for in memory sort
+ print( "retrying check..." );
+ check(); // retry with different bounds
+ return;
+ }
+
+ var j = 0;
+ for( var i = 0; i < c3.length; ++i ) {
+ if( friendlyEqual( c1[ j ], c3[ i ] ) ) {
+ ++j;
+ } else {
+ var o = c3[ i ];
+ var size = Object.bsonsize( o );
+ for( var f in o ) {
+ size -= f.length;
+ }
+
+ var max = indexVersion == 0 ? 819 : 818;
+
+ if ( size <= max /* KeyMax */ ) {
+ assert.eq( c1, c3 , "size: " + size );
+ }
+ }
+ }
+ }
+
+ for( var i = 0; i < 10000; ++i ) {
+ t.save( obj() );
+ }
+
+ t.ensureIndex( idx , { v : indexVersion } );
+ check();
+
+ for( var i = 0; i < 10000; ++i ) {
+ if ( Random.rand() > 0.9 ) {
+ t.save( obj() );
+ } else {
+ t.remove( obj() ); // improve
+ }
+ if( Random.rand() > 0.999 ) {
+ print( i );
+ check();
+ }
+ }
+
+ check();
+
+}
+
+for( var z = 0; z < 5; ++z ) {
+ var indexVersion = z % 2;
+ doIt( indexVersion );
+}
diff --git a/jstests/slowNightly/index_check9.js b/jstests/slowNightly/index_check9.js
index 6634d06..33ce0a6 100644
--- a/jstests/slowNightly/index_check9.js
+++ b/jstests/slowNightly/index_check9.js
@@ -1,3 +1,5 @@
+// Randomized index testing
+
Random.setRandomSeed();
t = db.test_index_check9;
diff --git a/jstests/slowNightly/replReads.js b/jstests/slowNightly/replReads.js
new file mode 100644
index 0000000..4fe9130
--- /dev/null
+++ b/jstests/slowNightly/replReads.js
@@ -0,0 +1,108 @@
+2// Test that doing slaveOk reads from secondaries hits all the secondaries evenly
+
+function testReadLoadBalancing(numReplicas) {
+
+ s = new ShardingTest( "replReads" , 1 /* numShards */, 0 /* verboseLevel */, 1 /* numMongos */, { rs : true , numReplicas : numReplicas, chunksize : 1 } )
+
+ s.adminCommand({enablesharding : "test"})
+ s.config.settings.find().forEach(printjson)
+
+ s.adminCommand({shardcollection : "test.foo", key : {_id : 1}})
+
+ s.getDB("test").foo.insert({a : 123})
+
+ primary = s._rs[0].test.liveNodes.master
+ secondaries = s._rs[0].test.liveNodes.slaves
+
+ function rsStats() {
+ return s.getDB( "admin" ).runCommand( "connPoolStats" )["replicaSets"]["replReads-rs0"];
+ }
+
+ assert.eq( numReplicas , rsStats().hosts.length );
+
+ function isMasterOrSecondary( info ){
+ if ( ! info.ok )
+ return false;
+ if ( info.ismaster )
+ return true;
+ return info.secondary && ! info.hidden;
+ }
+
+ assert.soon(
+ function() {
+ var x = rsStats().hosts;
+ printjson(x)
+ for ( var i=0; i<x.length; i++ )
+ if ( ! isMasterOrSecondary( x[i] ) )
+ return false;
+ return true;
+ }
+ );
+
+ for (var i = 0; i < secondaries.length; i++) {
+ assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } )
+ secondaries[i].getDB('test').setProfilingLevel(2)
+ }
+
+ for (var i = 0; i < secondaries.length * 10; i++) {
+ conn = new Mongo(s._mongos[0].host)
+ conn.setSlaveOk()
+ conn.getDB('test').foo.findOne()
+ }
+
+ for (var i = 0; i < secondaries.length; i++) {
+ var profileCollection = secondaries[i].getDB('test').system.profile;
+ assert.eq(10, profileCollection.find().count(), "Wrong number of read queries sent to secondary " + i + " " + tojson( profileCollection.find().toArray() ))
+ }
+
+ db = primary.getDB( "test" );
+
+ printjson(rs.status());
+ c = rs.conf();
+ print( "config before: " + tojson(c) );
+ for ( i=0; i<c.members.length; i++ ) {
+ if ( c.members[i].host == db.runCommand( "ismaster" ).primary )
+ continue;
+ c.members[i].hidden = true;
+ c.members[i].priority = 0;
+ break;
+ }
+ rs.reconfig( c );
+ print( "config after: " + tojson( rs.conf() ) );
+
+ assert.soon(
+ function() {
+ var x = rsStats();
+ printjson(x);
+ var numOk = 0;
+ for ( var i=0; i<x.hosts.length; i++ )
+ if ( x.hosts[i].hidden )
+ return true;
+ return false;
+ } , "one slave not ok" , 180000 , 5000
+ );
+
+ for (var i = 0; i < secondaries.length * 10; i++) {
+ conn = new Mongo(s._mongos[0].host)
+ conn.setSlaveOk()
+ conn.getDB('test').foo.findOne()
+ }
+
+ var counts = []
+ for (var i = 0; i < secondaries.length; i++) {
+ var profileCollection = secondaries[i].getDB('test').system.profile;
+ counts.push( profileCollection.find().count() );
+ }
+
+ counts = counts.sort();
+ assert.eq( 20 , counts[1] - counts[0] , "counts wrong: " + tojson( counts ) );
+
+ s.stop()
+}
+
+//for (var i = 1; i < 10; i++) {
+// testReadLoadBalancing(i)
+//}
+
+// Is there a way that this can be run multiple times with different values?
+testReadLoadBalancing(3)
diff --git a/jstests/slowNightly/replsets_priority1.js b/jstests/slowNightly/replsets_priority1.js
new file mode 100644
index 0000000..3eef5cf
--- /dev/null
+++ b/jstests/slowNightly/replsets_priority1.js
@@ -0,0 +1,173 @@
+// come up with random priorities and make sure that the right member gets
+// elected. then kill that member and make sure then next one gets elected.
+
+load("jstests/replsets/rslib.js");
+
+var rs = new ReplSetTest( {name: 'testSet', nodes: 3} );
+var nodes = rs.startSet();
+rs.initiate();
+
+var master = rs.getMaster();
+
+var everyoneOkSoon = function() {
+ var status;
+ assert.soon(function() {
+ var ok = true;
+ status = master.adminCommand({replSetGetStatus : 1});
+
+ if (!status.members) {
+ return false;
+ }
+
+ for (var i in status.members) {
+ if (status.members[i].health == 0) {
+ continue;
+ }
+ ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ }
+ return ok;
+ }, tojson(status));
+};
+
+var checkPrimaryIs = function(node) {
+ var status;
+
+ assert.soon(function() {
+ var ok = true;
+
+ try {
+ status = master.adminCommand({replSetGetStatus : 1});
+ }
+ catch(e) {
+ print(e);
+ reconnect(master);
+ status = master.adminCommand({replSetGetStatus : 1});
+ }
+
+ var str = "goal: "+node.host+"==1 states: ";
+ if (!status || !status.members) {
+ return false;
+ }
+ status.members.forEach( function(m) {
+ str += m.name + ": "+m.state +" ";
+
+ if (m.name == node.host) {
+ ok &= m.state == 1;
+ }
+ else {
+ ok &= m.state != 1 || (m.state == 1 && m.health == 0);
+ }
+ });
+ print(str);
+
+ occasionally(function() {
+ printjson(status);
+ }, 15);
+
+ return ok;
+ }, node.host+'==1', 60000, 1000);
+
+ everyoneOkSoon();
+};
+
+everyoneOkSoon();
+
+// intial sync
+master.getDB("foo").bar.insert({x:1});
+rs.awaitReplication();
+
+print("starting loop");
+
+var n = 5;
+for (i=0; i<n; i++) {
+ print("Round "+i+": FIGHT!");
+
+ var max = null;
+ var second = null;
+ reconnect(master);
+ var config = master.getDB("local").system.replset.findOne();
+
+ var version = config.version;
+ config.version++;
+
+ for (var j=0; j<config.members.length; j++) {
+ var priority = Math.random()*100;
+ config.members[j].priority = priority;
+
+ if (!max || priority > max.priority) {
+ max = config.members[j];
+ }
+ }
+
+ for (var j=0; j<config.members.length; j++) {
+ if (config.members[j] == max) {
+ continue;
+ }
+ if (!second || config.members[j].priority > second.priority) {
+ second = config.members[j];
+ }
+ }
+
+ print("max is "+max.host+" with priority "+max.priority+", reconfiguring...");
+
+ var count = 0;
+ while (config.version != version && count < 100) {
+ reconnect(master);
+
+ occasionally(function() {
+ print("version is "+version+", trying to update to "+config.version);
+ });
+
+ try {
+ master.adminCommand({replSetReconfig : config});
+ master = rs.getMaster();
+ reconnect(master);
+
+ version = master.getDB("local").system.replset.findOne().version;
+ }
+ catch (e) {
+ print("Caught exception: "+e);
+ }
+
+ count++;
+ }
+
+ assert.soon(function() {
+ rs.getMaster();
+ return rs.liveNodes.slaves.length == 2;
+ }, "2 slaves");
+
+ assert.soon(function() {
+ versions = [0,0];
+ rs.liveNodes.slaves[0].setSlaveOk();
+ versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version;
+ rs.liveNodes.slaves[1].setSlaveOk();
+ versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version;
+ return versions[0] == config.version && versions[1] == config.version;
+ });
+
+ // the reconfiguration needs to be replicated! the hb sends it out
+ // separately from the repl
+ rs.awaitReplication();
+
+ print("reconfigured. Checking statuses.");
+
+ checkPrimaryIs(max);
+
+ rs.stop(max._id);
+
+ var master = rs.getMaster();
+
+ print("killed max primary. Checking statuses.");
+
+ print("second is "+second.host+" with priority "+second.priority);
+ checkPrimaryIs(second);
+
+ rs.restart(max._id);
+ master = rs.getMaster();
+
+ print("Restarted max. Checking statuses.");
+ checkPrimaryIs(max);
+}
+
+print("priority1.js SUCCESS!");
diff --git a/jstests/slowNightly/sharding_balance1.js b/jstests/slowNightly/sharding_balance1.js
index 9379c4f..c50148c 100644
--- a/jstests/slowNightly/sharding_balance1.js
+++ b/jstests/slowNightly/sharding_balance1.js
@@ -41,7 +41,8 @@ print( diff() )
assert.soon( function(){
var d = diff();
return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 3 , 5000 );
+// Make sure there's enough time here, since balancing can sleep for 15s or so between balances.
+} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
var chunkCount = sum();
s.adminCommand( { removeshard: "shard0000" } );
diff --git a/jstests/slowNightly/sharding_balance4.js b/jstests/slowNightly/sharding_balance4.js
index c7f76dd..5288bda 100644
--- a/jstests/slowNightly/sharding_balance4.js
+++ b/jstests/slowNightly/sharding_balance4.js
@@ -90,8 +90,12 @@ function diff(){
if ( le.err )
print( "ELIOT ELIOT : " + tojson( le ) + "\t" + myid );
- assert( le.updatedExisting , "GLE diff 1 myid: " + myid + " " + tojson(le) )
- assert.eq( 1 , le.n , "GLE diff 2 myid: " + myid + " " + tojson(le) )
+ if ( ! le.updatedExisting || le.n != 1 ) {
+ print( "going to assert for id: " + myid + " correct count is: " + counts[myid] + " db says count is: " + db.foo.findOne( { _id : myid } ) );
+ }
+
+ assert( le.updatedExisting , "GLE diff myid: " + myid + " 1: " + tojson(le) )
+ assert.eq( 1 , le.n , "GLE diff myid: " + myid + " 2: " + tojson(le) )
if ( Math.random() > .99 ){
diff --git a/jstests/slowNightly/sharding_migrateBigObject.js b/jstests/slowNightly/sharding_migrateBigObject.js
new file mode 100644
index 0000000..5ad9ed1
--- /dev/null
+++ b/jstests/slowNightly/sharding_migrateBigObject.js
@@ -0,0 +1,61 @@
+
+var shardA = startMongodEmpty("--shardsvr", "--port", 30001, "--dbpath", "/data/migrateBigger0");
+var shardB = startMongodEmpty("--shardsvr", "--port", 30002, "--dbpath", "/data/migrateBigger1");
+var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/migrateBiggerC");
+
+var mongos = startMongos("--port", 30000, "--configdb", "localhost:29999")
+
+var admin = mongos.getDB("admin")
+
+admin.runCommand({ addshard : "localhost:30001" })
+admin.runCommand({ addshard : "localhost:30002" })
+
+db = mongos.getDB("test");
+var coll = db.getCollection("stuff")
+
+var data = "x"
+var nsq = 16
+var n = 255
+
+for( var i = 0; i < nsq; i++ ) data += data
+
+dataObj = {}
+for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data
+
+for( var i = 0; i < 40; i++ ) {
+ if(i != 0 && i % 10 == 0) printjson( coll.stats() )
+ coll.save({ data : dataObj })
+}
+db.getLastError();
+
+assert.eq( 40 , coll.count() , "prep1" );
+
+printjson( coll.stats() )
+
+admin.runCommand({ enablesharding : "" + coll.getDB() })
+
+admin.printShardingStatus()
+
+admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } })
+
+assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
+
+assert.soon(
+ function(){
+ res = mongos.getDB( "config" ).chunks.group( { cond : { ns : "test.stuff" } ,
+ key : { shard : 1 } ,
+ reduce : function( doc , out ){ out.nChunks++; } ,
+ initial : { nChunks : 0 } } );
+
+ printjson( res );
+ return res.length > 1 && Math.abs( res[0].nChunks - res[1].nChunks ) <= 3;
+
+ } ,
+ "never migrated" , 180000 , 1000 );
+
+stopMongod( 30000 );
+stopMongod( 29999 );
+stopMongod( 30001 );
+stopMongod( 30002 );
+
+
diff --git a/jstests/slowNightly/sharding_multiple_ns_rs.js b/jstests/slowNightly/sharding_multiple_ns_rs.js
new file mode 100644
index 0000000..3cd7b3e
--- /dev/null
+++ b/jstests/slowNightly/sharding_multiple_ns_rs.js
@@ -0,0 +1,49 @@
+
+s = new ShardingTest( "blah" , 1 /* numShards */, 1 /* verboseLevel */, 1 /* numMongos */, { rs : true , chunksize : 1 } )
+
+s.adminCommand( { enablesharding : "test" } );
+s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+
+db = s.getDB( "test" );
+
+for ( i=0; i<100; i++ ) {
+ db.foo.insert( { _id : i , x : i } )
+ db.bar.insert( { _id : i , x : i } )
+}
+
+db.getLastError();
+
+sh.splitAt( "test.foo" , { _id : 50 } )
+
+other = new Mongo( s.s.name );
+dbother = other.getDB( "test" );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+
+
+s._rs[0].test.awaitReplication();
+
+s._rs[0].test.stopMaster( 15 , true )
+
+sleep( 20 * 1000 );
+
+assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
+assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+
+s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+sh.splitAt( "test.bar" , { _id : 50 } )
+
+yetagain = new Mongo( s.s.name )
+assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x )
+assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x )
+
+assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+
+
+s.stop();
+
diff --git a/jstests/slowNightly/sharding_passthrough.js b/jstests/slowNightly/sharding_passthrough.js
index 81781ca..d81df68 100644
--- a/jstests/slowNightly/sharding_passthrough.js
+++ b/jstests/slowNightly/sharding_passthrough.js
@@ -1,6 +1,6 @@
-s = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 );
-s.adminCommand( { enablesharding : "test" } );
-db=s.getDB("test");
+myShardingTest = new ShardingTest( "sharding_passthrough" , 2 , 1 , 1 );
+myShardingTest.adminCommand( { enablesharding : "test" } );
+db=myShardingTest.getDB("test");
var files = listFiles("jstests");
@@ -9,7 +9,6 @@ var runnerStart = new Date()
files.forEach(
function(x) {
-// /(basic|update).*\.js$/
if ( /[\/\\]_/.test(x.name) ||
! /\.js$/.test(x.name ) ){
print(" >>>>>>>>>>>>>>> skipping " + x.name);
@@ -63,17 +62,17 @@ files.forEach(
* clean (apitest_dbcollection)
* logout and getnonce
*/
- if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile1|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|unix_socket\d*)\.js$/.test(x.name)) {
+ if (/[\/\\](error3|capped.*|splitvector|apitest_db|cursor6|copydb-auth|profile\d*|dbhash|median|apitest_dbcollection|evalb|evald|eval_nolock|auth1|auth2|dropdb_race|unix_socket\d*)\.js$/.test(x.name)) {
print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
return;
}
// These are bugs (some might be fixed now):
- if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4)\.js$/.test(x.name)) {
+ if (/[\/\\](apply_ops1|count5|cursor8|or4|shellkillop|update4|profile\d*)\.js$/.test(x.name)) {
print(" !!!!!!!!!!!!!!! skipping test that has failed under sharding but might not anymore " + x.name)
return;
}
// These aren't supposed to get run under sharding:
- if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|check_shard_index|mr_replaceIntoDB)\.js$/.test(x.name)) {
+ if (/[\/\\](dbadmin|error1|fsync|fsync2|geo.*|indexh|remove5|update4|notablescan|compact.*|check_shard_index|bench_test.*|mr_replaceIntoDB)\.js$/.test(x.name)) {
print(" >>>>>>>>>>>>>>> skipping test that would fail under sharding " + x.name)
return;
}
@@ -89,6 +88,9 @@ files.forEach(
);
+myShardingTest.stop()
+
var runnerEnd = new Date()
print( "total runner time: " + ( ( runnerEnd.getTime() - runnerStart.getTime() ) / 1000 ) + "secs" )
+
diff --git a/jstests/slowNightly/sharding_rs1.js b/jstests/slowNightly/sharding_rs1.js
index 4ad126e..f73e690 100644
--- a/jstests/slowNightly/sharding_rs1.js
+++ b/jstests/slowNightly/sharding_rs1.js
@@ -1,6 +1,6 @@
// tests sharding with replica sets
-s = new ShardingTest( "rs1" , 3 , 1 , 2 , { rs : true , chunksize : 1 } )
+s = new ShardingTest( "rs1" , 3 /* numShards */, 1 /* verboseLevel */, 2 /* numMongos */, { rs : true , chunksize : 1 } )
s.adminCommand( { enablesharding : "test" } );
@@ -59,6 +59,12 @@ assert.soon( function(){
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+sleep( 1000 );
+
+while ( sh.isBalancerRunning() ){
+ sleep( 1000 );
+}
+
for ( i=0; i<s._rs.length; i++ ){
r = s._rs[i];
r.test.awaitReplication();
diff --git a/jstests/slowNightly/sharding_rs2.js b/jstests/slowNightly/sharding_rs2.js
index cd7cf68..4de935b 100644
--- a/jstests/slowNightly/sharding_rs2.js
+++ b/jstests/slowNightly/sharding_rs2.js
@@ -155,7 +155,29 @@ assert.eq( before.query + 10 , after.query , "E3" )
assert.eq( 100 , ts.count() , "E4" )
assert.eq( 100 , ts.find().itcount() , "E5" )
printjson( ts.find().batchSize(5).explain() )
+
+before = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+// Careful, mongos can poll the masters here too unrelated to the query,
+// resulting in this test failing sporadically if/when there's a delay here.
assert.eq( 100 , ts.find().batchSize(5).itcount() , "E6" )
+after = rs.test.getMaster().adminCommand( "serverStatus" ).opcounters
+assert.eq( before.query + before.getmore , after.query + after.getmore , "E6.1" )
+
+assert.eq( 100 , ts.find().batchSize(5).itcount() , "F1" )
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ m.setSlaveOk();
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F2." + i )
+}
+
+for ( i=0; i<10; i++ ) {
+ m = new Mongo( s.s.name );
+ ts = m.getDB( "test" ).foo
+ assert.eq( 100 , ts.find().batchSize(5).itcount() , "F3." + i )
+}
+
printjson( db.adminCommand( "getShardMap" ) );
diff --git a/jstests/slowNightly/sharding_rs_arb1.js b/jstests/slowNightly/sharding_rs_arb1.js
new file mode 100644
index 0000000..be4c4dc
--- /dev/null
+++ b/jstests/slowNightly/sharding_rs_arb1.js
@@ -0,0 +1,40 @@
+x = 5
+name = "sharding_rs_arb1"
+replTest = new ReplSetTest( { name : name , nodes : 3 , startPort : 31000 } );
+nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({_id : name, members :
+ [
+ {_id:0, host : getHostName()+":"+port[0]},
+ {_id:1, host : getHostName()+":"+port[1]},
+ {_id:2, host : getHostName()+":"+port[2], arbiterOnly : true},
+ ],
+ });
+
+replTest.awaitReplication();
+
+master = replTest.getMaster();
+db = master.getDB( "test" );
+printjson( rs.status() );
+
+var config = startMongodEmpty("--configsvr", "--port", 29999, "--dbpath", "/data/db/" + name + "_config" );
+
+var mongos = startMongos("--port", 30000, "--configdb", getHostName() + ":29999")
+var admin = mongos.getDB("admin")
+var url = name + "/";
+for ( i=0; i<port.length; i++ ) {
+ if ( i > 0 )
+ url += ",";
+ url += getHostName() + ":" + port[i];
+}
+print( url )
+res = admin.runCommand( { addshard : url } )
+printjson( res )
+assert( res.ok , tojson(res) )
+
+
+
+stopMongod( 30000 )
+stopMongod( 29999 )
+replTest.stopSet();
+
diff --git a/jstests/slowNightly/sync6_slow.js b/jstests/slowNightly/sync6_slow.js
new file mode 100644
index 0000000..63d6123
--- /dev/null
+++ b/jstests/slowNightly/sync6_slow.js
@@ -0,0 +1,82 @@
+// More complete version of sharding/sync6.js
+// Test that distributed lock forcing does not result in inconsistencies, using a
+// fast timeout.
+
+// Note that this test will always have random factors, since we can't control the
+// thread scheduling.
+
+test = new SyncCCTest( "sync6", { logpath : "/dev/null" } )
+
+// Startup another process to handle our commands to the cluster, mostly so it's
+// easier to read.
+var commandConn = startMongodTest( 30000 + 4, "syncCommander", false, {})//{ logpath : "/dev/null" } )//{verbose : ""} )
+// { logpath : "/data/db/syncCommander/mongod.log" } );
+
+// Up the log level for this test
+commandConn.getDB( "admin" ).runCommand( { setParameter : 1, logLevel : 0 } )
+
+// Have lots of threads, so use larger i
+// Can't test too many, we get socket exceptions... possibly due to the
+// javascript console.
+// TODO: Figure out our max bounds here - use less threads now to avoid pinger starvation issues.
+for ( var t = 0; t < 4; t++ ) {
+for ( var i = 4; i < 5; i++ ) {
+
+ // Our force time is 6 seconds - slightly diff from sync6 to ensure exact time not important
+ var takeoverMS = 6000;
+
+ // Generate valid sleep and skew for this timeout
+ var threadSleepWithLock = takeoverMS / 2;
+ var configServerTimeSkew = [ 0, 0, 0 ]
+ for ( var h = 0; h < 3; h++ ) {
+ // Skew by 1/30th the takeover time either way, at max
+ configServerTimeSkew[h] = ( i + h ) % Math.floor( takeoverMS / 60 )
+ // Make skew pos or neg
+ configServerTimeSkew[h] *= ( ( i + h ) % 2 ) ? -1 : 1;
+ }
+
+ // Build command
+ command = { _testDistLockWithSkew : 1 }
+
+ // Basic test parameters
+ command["lockName"] = "TimeSkewFailNewTest_lock_" + i;
+ command["host"] = test.url
+ command["seed"] = i
+ command["numThreads"] = ( i % 50 ) + 1
+
+ // Critical values so we're sure of correct operation
+ command["takeoverMS"] = takeoverMS
+ command["wait"] = 4 * takeoverMS // so we must force the lock
+ command["skewHosts"] = configServerTimeSkew
+ command["threadWait"] = threadSleepWithLock
+
+ // Less critical test params
+
+ // 1/3 of threads will not release the lock
+ command["hangThreads"] = 3
+ // Amount of time to wait before trying lock again
+ command["threadSleep"] = 1;// ( ( i + 1 ) * 100 ) % (takeoverMS / 4)
+ // Amount of total clock skew possible between locking threads (processes)
+ // This can be large now.
+ command["skewRange"] = ( command["takeoverMS"] * 3 ) * 60 * 1000
+
+ // Double-check our sleep, host skew, and takeoverMS values again
+
+ // At maximum, our threads must sleep only half the lock timeout time.
+ assert( command["threadWait"] <= command["takeoverMS"] / 2 )
+ for ( var h = 0; h < command["skewHosts"].length; h++ ) {
+ // At maximum, our config server time skew needs to be less than 1/30th
+ // the total time skew (1/60th either way).
+ assert( Math.abs( command["skewHosts"][h] ) <= ( command["takeoverMS"] / 60 ) )
+ }
+
+ result = commandConn.getDB( "admin" ).runCommand( command )
+ printjson( result )
+ printjson( command )
+ assert( result.ok, "Skewed threads did not increment correctly." );
+
+}
+}
+
+stopMongoProgram( 30004 )
+test.stop();
diff --git a/jstests/slowWeekly/geo_full.js b/jstests/slowWeekly/geo_full.js
new file mode 100644
index 0000000..9eb1b7a
--- /dev/null
+++ b/jstests/slowWeekly/geo_full.js
@@ -0,0 +1,487 @@
+//
+// Integration test of the geo code
+//
+// Basically, this tests adds a random number of docs with a random number of points,
+// given a 2d environment of random precision which is either randomly earth-like or of
+// random bounds, and indexes these points after a random amount of points have been added
+// with a random number of additional fields which correspond to whether the documents are
+// in randomly generated circular, spherical, box, and box-polygon shapes (and exact),
+// queried randomly from a set of query types. Each point is randomly either and object
+// or array, and all points and document data fields are nested randomly in arrays (or not).
+//
+// We approximate the user here as a random function :-)
+//
+// These random point fields can then be tested against all types of geo queries using these random shapes.
+//
+// Tests can be easily reproduced by getting the test number from the output directly before a
+// test fails, and hard-wiring that as the test number.
+//
+
+
+var randEnvironment = function(){
+
+ // Normal earth environment
+ if( Random.rand() < 0.5 ){
+ return { max : 180,
+ min : -180,
+ bits : Math.floor( Random.rand() * 32 ) + 1,
+ earth : true,
+ bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) }
+ }
+
+ var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ]
+ var scale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var offset = Random.rand() * scale
+
+ var max = Random.rand() * scale + offset
+ var min = - Random.rand() * scale + offset
+ var bits = Math.floor( Random.rand() * 32 ) + 1
+ var range = max - min
+ var bucketSize = range / ( 4 * 1024 * 1024 * 1024 )
+
+ return { max : max,
+ min : min,
+ bits : bits,
+ earth : false,
+ bucketSize : bucketSize }
+
+}
+
+var randPoint = function( env, query ) {
+
+ if( query && Random.rand() > 0.5 )
+ return query.exact
+
+ if( env.earth )
+ return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]
+
+ var range = env.max - env.min
+ return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
+}
+
+var randLocType = function( loc, wrapIn ){
+ return randLocTypes( [ loc ], wrapIn )[0]
+}
+
+var randLocTypes = function( locs, wrapIn ) {
+
+ var rLocs = []
+
+ for( var i = 0; i < locs.length; i++ ){
+ if( Random.rand() < 0.5 )
+ rLocs.push( { x : locs[i][0], y : locs[i][1] } )
+ else
+ rLocs.push( locs[i] )
+ }
+
+ if( wrapIn ){
+ var wrappedLocs = []
+ for( var i = 0; i < rLocs.length; i++ ){
+ var wrapper = {}
+ wrapper[wrapIn] = rLocs[i]
+ wrappedLocs.push( wrapper )
+ }
+
+ return wrappedLocs
+ }
+
+ return rLocs
+
+}
+
+var randDataType = function() {
+
+ var scales = [ 1, 10, 100, 1000, 10000 ]
+ var docScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+ var locScale = scales[ Math.floor( Random.rand() * scales.length ) ]
+
+ var numDocs = 40000
+ var maxLocs = 40000
+ // Make sure we don't blow past our test resources
+ while( numDocs * maxLocs > 40000 ){
+ numDocs = Math.floor( Random.rand() * docScale ) + 1
+ maxLocs = Math.floor( Random.rand() * locScale ) + 1
+ }
+
+ return { numDocs : numDocs,
+ maxLocs : maxLocs }
+
+}
+
+var randQuery = function( env ) {
+
+ var center = randPoint( env )
+
+ var sphereRadius = -1
+ var sphereCenter = null
+ if( env.earth ){
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ sphereRadius = Random.rand() * 45 * Math.PI / 180
+ sphereCenter = randPoint( env )
+ var i
+ for( i = 0; i < 5; i++ ){
+ var t = db.testSphere; t.drop(); t.ensureIndex({ loc : "2d" }, env )
+ try{ t.find({ loc : { $within : { $centerSphere : [ sphereCenter, sphereRadius ] } } } ).count(); var err; if( err = db.getLastError() ) throw err; }
+ catch(e) { print( e ); continue }
+ print( " Radius " + sphereRadius + " and center " + sphereCenter + " ok ! ")
+ break;
+ }
+ if( i == 5 ) sphereRadius = -1;
+
+ }
+
+ var box = [ randPoint( env ), randPoint( env ) ]
+
+ var boxPoly = [[ box[0][0], box[0][1] ],
+ [ box[0][0], box[1][1] ],
+ [ box[1][0], box[1][1] ],
+ [ box[1][0], box[0][1] ] ]
+
+ if( box[0][0] > box[1][0] ){
+ var swap = box[0][0]
+ box[0][0] = box[1][0]
+ box[1][0] = swap
+ }
+
+ if( box[0][1] > box[1][1] ){
+ var swap = box[0][1]
+ box[0][1] = box[1][1]
+ box[1][1] = swap
+ }
+
+ return { center : center,
+ radius : box[1][0] - box[0][0],
+ exact : randPoint( env ),
+ sphereCenter : sphereCenter,
+ sphereRadius : sphereRadius,
+ box : box,
+ boxPoly : boxPoly }
+
+}
+
+
+var resultTypes = {
+"exact" : function( loc ){
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1]
+},
+"center" : function( loc ){
+ return Geo.distance( query.center, loc ) <= query.radius
+},
+"box" : function( loc ){
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+
+},
+"sphere" : function( loc ){
+ return ( query.sphereRadius >= 0 ? ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false )
+},
+"poly" : function( loc ){
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1]
+}}
+
+var queryResults = function( locs, query, results ){
+
+ if( ! results["center"] ){
+ for( var type in resultTypes ){
+ results[type] = {
+ docsIn : 0,
+ docsOut : 0,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+ }
+
+ var indResults = {}
+ for( var type in resultTypes ){
+ indResults[type] = {
+ docIn : false,
+ locsIn : 0,
+ locsOut : 0
+ }
+ }
+
+ for( var type in resultTypes ){
+
+ var docIn = false
+ for( var i = 0; i < locs.length; i++ ){
+ if( resultTypes[type]( locs[i] ) ){
+ results[type].locsIn++
+ indResults[type].locsIn++
+ indResults[type].docIn = true
+ }
+ else{
+ results[type].locsOut++
+ indResults[type].locsOut++
+ }
+ }
+ if( indResults[type].docIn ) results[type].docsIn++
+ else results[type].docsOut++
+
+ }
+
+ return indResults
+
+}
+
+var randQueryAdditions = function( doc, indResults ){
+
+ for( var type in resultTypes ){
+ var choice = Random.rand()
+ if( Random.rand() < 0.25 )
+ doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } )
+ else if( Random.rand() < 0.5 )
+ doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } )
+ else if( Random.rand() < 0.75 )
+ doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] )
+ else
+ doc[type] = ( indResults[type].docIn ? [ { docIn : [ "yes" ] } ] : [ { docIn : [ "no" ] } ] )
+ }
+
+}
+
+var randIndexAdditions = function( indexDoc ){
+
+ for( var type in resultTypes ){
+
+ if( Random.rand() < 0.5 ) continue;
+
+ var choice = Random.rand()
+ if( Random.rand() < 0.5 )
+ indexDoc[type] = 1
+ else
+ indexDoc[type + ".docIn"] = 1
+
+ }
+
+}
+
+var randYesQuery = function(){
+
+ var choice = Math.floor( Random.rand() * 7 )
+ if( choice == 0 )
+ return { $ne : "no" }
+ else if( choice == 1 )
+ return "yes"
+ else if( choice == 2 )
+ return /^yes/
+ else if( choice == 3 )
+ return { $in : [ "good", "yes", "ok" ] }
+ else if( choice == 4 )
+ return { $exists : true }
+ else if( choice == 5 )
+ return { $nin : [ "bad", "no", "not ok" ] }
+ else if( choice == 6 )
+ return { $not : /^no/ }
+}
+
+var locArray = function( loc ){
+ if( loc.x ) return [ loc.x, loc.y ]
+ if( ! loc.length ) return [ loc[0], loc[1] ]
+ return loc
+}
+
+var locsArray = function( locs ){
+ if( locs.loc ){
+ arr = []
+ for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) )
+ return arr
+ }
+ else{
+ arr = []
+ for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) )
+ return arr
+ }
+}
+
+var minBoxSize = function( env, box ){
+ return env.bucketSize * Math.pow( 2, minBucketScale( env, box ) )
+}
+
+var minBucketScale = function( env, box ){
+
+ if( box.length && box[0].length )
+ box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ]
+
+ if( box.length )
+ box = Math.max( box[0], box[1] )
+
+ print( box )
+ print( env.bucketSize )
+
+ return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) )
+
+}
+
+// TODO: Add spherical $uniqueDocs tests
+var numTests = 100
+
+// Our seed will change every time this is run, but
+// each individual test will be reproducible given
+// that seed and test number
+var seed = new Date().getTime()
+
+for ( var test = 0; test < numTests; test++ ) {
+
+ Random.srand( seed + test );
+ //Random.srand( 42240 )
+ //Random.srand( 7344 )
+ var t = db.testAllGeo
+ t.drop()
+
+ print( "Generating test environment #" + test )
+ var env = randEnvironment()
+ //env.bits = 11
+ var query = randQuery( env )
+ var data = randDataType()
+ //data.numDocs = 100; data.maxLocs = 3;
+ var results = {}
+ var totalPoints = 0
+ print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " )
+
+ // Index after a random number of docs added
+ var indexIt = Math.floor( Random.rand() * data.numDocs )
+
+ for ( var i = 0; i < data.numDocs; i++ ) {
+
+ if( indexIt == i ){
+ var indexDoc = { "locs.loc" : "2d" }
+ randIndexAdditions( indexDoc )
+
+ // printjson( indexDoc )
+
+ t.ensureIndex( indexDoc, env )
+ assert.isnull( db.getLastError() )
+ }
+
+ var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 )
+ totalPoints += numLocs
+
+ var multiPoint = []
+ for ( var p = 0; p < numLocs; p++ ) {
+ var point = randPoint( env, query )
+ multiPoint.push( point )
+ }
+
+ var indResults = queryResults( multiPoint, query, results )
+
+ var doc
+ // Nest the keys differently
+ if( Random.rand() < 0.5 )
+ doc = { locs : { loc : randLocTypes( multiPoint ) } }
+ else
+ doc = { locs : randLocTypes( multiPoint, "loc" ) }
+
+ randQueryAdditions( doc, indResults )
+
+ //printjson( doc )
+ doc._id = i
+ t.insert( doc )
+
+ }
+
+ printjson( { seed : seed,
+ test: test,
+ env : env,
+ query : query,
+ data : data,
+ results : results } )
+
+
+ // exact
+ print( "Exact query..." )
+ assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() )
+
+ // $center
+ print( "Center query..." )
+ print( "Min box : " + minBoxSize( env, query.radius ) )
+ assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.center.locsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : false } }, "center.docIn" : randYesQuery() } ).count() )
+ if( query.sphereRadius >= 0 ){
+ print( "Center sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.sphere.locsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ], $uniqueDocs : 0.0 } }, "sphere.docIn" : randYesQuery() } ).count() )
+ }
+
+ // $box
+ print( "Box query..." )
+ assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.box.locsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : false } }, "box.docIn" : randYesQuery() } ).count() )
+
+ // $polygon
+ print( "Polygon query..." )
+ assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() )
+ assert.eq( results.poly.locsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly, $uniqueDocs : 0 } }, "poly.docIn" : randYesQuery() } ).count() )
+
+ // $near
+ print( "Near query..." )
+ assert.eq( results.center.locsIn > 100 ? 100 : results.center.locsIn, t.find( { "locs.loc" : { $near : query.center, $maxDistance : query.radius } } ).count( true ) )
+
+ if( query.sphereRadius >= 0 ){
+ print( "Near sphere query...")
+ // $centerSphere
+ assert.eq( results.sphere.locsIn > 100 ? 100 : results.sphere.locsIn, t.find( { "locs.loc" : { $nearSphere : query.sphereCenter, $maxDistance : query.sphereRadius } } ).count( true ) )
+ }
+
+
+ // geoNear
+ // results limited by size of objects
+ if( data.maxLocs < 100 ){
+
+ // GeoNear query
+ print( "GeoNear query..." )
+ assert.eq( results.center.locsIn > 100 ? 100 : results.center.locsIn, t.getDB().runCommand({ geoNear : "testAllGeo", near : query.center, maxDistance : query.radius }).results.length )
+ // GeoNear query
+ assert.eq( results.center.docsIn > 100 ? 100 : results.center.docsIn, t.getDB().runCommand({ geoNear : "testAllGeo", near : query.center, maxDistance : query.radius, uniqueDocs : true }).results.length )
+
+
+ var num = 2 * results.center.locsIn;
+ if( num > 200 ) num = 200;
+
+ var output = db.runCommand( {
+ geoNear : "testAllGeo",
+ near : query.center,
+ maxDistance : query.radius ,
+ includeLocs : true,
+ num : num } ).results
+
+ assert.eq( Math.min( 200, results.center.locsIn ), output.length )
+
+ var distance = 0;
+ for ( var i = 0; i < output.length; i++ ) {
+ var retDistance = output[i].dis
+ var retLoc = locArray( output[i].loc )
+
+ // print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
+ // + retDistance + " vs " + radius )
+
+ var arrLocs = locsArray( output[i].obj.locs )
+
+ assert.contains( retLoc, arrLocs )
+
+ // printjson( arrLocs )
+
+ var distInObj = false
+ for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
+ var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] )
+ distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 )
+ }
+
+ assert( distInObj )
+ assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 )
+ assert.lte( retDistance, query.radius )
+ assert.gte( retDistance, distance )
+ distance = retDistance
+ }
+
+ }
+
+ //break;
+
+
+}
+
+
diff --git a/jstests/slowWeekly/geo_mnypts_plus_fields.js b/jstests/slowWeekly/geo_mnypts_plus_fields.js
new file mode 100644
index 0000000..f67e49b
--- /dev/null
+++ b/jstests/slowWeekly/geo_mnypts_plus_fields.js
@@ -0,0 +1,98 @@
+// Test sanity of geo queries with a lot of points
+
+var maxFields = 2;
+
+for( var fields = 1; fields < maxFields; fields++ ){
+
+ var coll = db.testMnyPts
+ coll.drop()
+
+ var totalPts = 500 * 1000
+
+ // Add points in a 100x100 grid
+ for( var i = 0; i < totalPts; i++ ){
+ var ii = i % 10000
+
+ var doc = { loc : [ ii % 100, Math.floor( ii / 100 ) ] }
+
+ // Add fields with different kinds of data
+ for( var j = 0; j < fields; j++ ){
+
+ var field = null
+
+ if( j % 3 == 0 ){
+ // Make half the points not searchable
+ field = "abcdefg" + ( i % 2 == 0 ? "h" : "" )
+ }
+ else if( j % 3 == 1 ){
+ field = new Date()
+ }
+ else{
+ field = true
+ }
+
+ doc[ "field" + j ] = field
+ }
+
+ coll.insert( doc )
+ }
+
+ // Create the query for the additional fields
+ queryFields = {}
+ for( var j = 0; j < fields; j++ ){
+
+ var field = null
+
+ if( j % 3 == 0 ){
+ field = "abcdefg"
+ }
+ else if( j % 3 == 1 ){
+ field = { $lte : new Date() }
+ }
+ else{
+ field = true
+ }
+
+ queryFields[ "field" + j ] = field
+ }
+
+ coll.ensureIndex({ loc : "2d" })
+
+ // Check that quarter of points in each quadrant
+ for( var i = 0; i < 4; i++ ){
+ var x = i % 2
+ var y = Math.floor( i / 2 )
+
+ var box = [[0, 0], [49, 49]]
+ box[0][0] += ( x == 1 ? 50 : 0 )
+ box[1][0] += ( x == 1 ? 50 : 0 )
+ box[0][1] += ( y == 1 ? 50 : 0 )
+ box[1][1] += ( y == 1 ? 50 : 0 )
+
+ // Now only half of each result comes back
+ assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() )
+ assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() )
+
+ }
+
+ // Check that half of points in each half
+ for( var i = 0; i < 2; i++ ){
+
+ var box = [[0, 0], [49, 99]]
+ box[0][0] += ( i == 1 ? 50 : 0 )
+ box[1][0] += ( i == 1 ? 50 : 0 )
+
+ assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() )
+ assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() )
+
+ }
+
+ // Check that all but corner set of points in radius
+ var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ]
+
+ // All [99,x] pts are field0 : "abcdefg"
+ assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).count() )
+ assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).itcount() )
+
+}
+
diff --git a/jstests/slowWeekly/query_yield2.js b/jstests/slowWeekly/query_yield2.js
index dd7e5d9..6d06357 100644
--- a/jstests/slowWeekly/query_yield2.js
+++ b/jstests/slowWeekly/query_yield2.js
@@ -59,7 +59,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 100 , me );
+ assert.gt( 200 , me );
if ( x.inprog.length == 0 )
break;
diff --git a/jstests/slowWeekly/repair2.js b/jstests/slowWeekly/repair2.js
new file mode 100644
index 0000000..3097d81
--- /dev/null
+++ b/jstests/slowWeekly/repair2.js
@@ -0,0 +1,29 @@
+// SERVER-2843 The repair command should not yield.
+
+baseName = "jstests_repair2";
+
+t = db.getSisterDB( baseName )[ baseName ];
+t.drop();
+
+function protect( f ) {
+ try {
+ f();
+ } catch( e ) {
+ printjson( e );
+ }
+}
+
+s = startParallelShell( "db = db.getSisterDB( '" + baseName + "'); for( i = 0; i < 10; ++i ) { db.repairDatabase(); sleep( 5000 ); }" );
+
+for( i = 0; i < 30; ++i ) {
+
+ for( j = 0; j < 5000; ++j ) {
+ protect( function() { t.insert( {_id:j} ); } );
+ }
+
+ for( j = 0; j < 5000; ++j ) {
+ protect( function() { t.remove( {_id:j} ); } );
+ }
+
+ assert.eq( 0, t.count() );
+} \ No newline at end of file
diff --git a/jstests/slowWeekly/update_yield1.js b/jstests/slowWeekly/update_yield1.js
index 7e95855..5f71830 100644
--- a/jstests/slowWeekly/update_yield1.js
+++ b/jstests/slowWeekly/update_yield1.js
@@ -54,7 +54,7 @@ while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
assert.eq( 1 , x.inprog.length , "nothing in prog" );
}
- assert.gt( 2000 , me );
+ assert.gt( time / 3 , me );
}
join();
diff --git a/jstests/sort10.js b/jstests/sort10.js
new file mode 100644
index 0000000..74a2a2d
--- /dev/null
+++ b/jstests/sort10.js
@@ -0,0 +1,48 @@
+// signed dates check
+t = db.sort2;
+
+function checkSorting1(opts) {
+ t.drop();
+ t.insert({ x: new Date(50000) });
+ t.insert({ x: new Date(-50) });
+ var d = new Date(-50);
+ for (var pass = 0; pass < 2; pass++) {
+ assert(t.find().sort({x:1})[0].x.valueOf() == d.valueOf());
+ t.ensureIndex({ x: 1 }, opts);
+ t.insert({ x: new Date() });
+ }
+}
+
+checkSorting1({})
+checkSorting1({"background":true})
+
+
+
+function checkSorting2(dates, sortOrder) {
+ cur = t.find().sort({x:sortOrder});
+ assert.eq(dates.length, cur.count(), "Incorrect number of results returned");
+ index = 0;
+ while (cur.hasNext()) {
+ date = cur.next().x;
+ assert.eq(dates[index].valueOf(), date.valueOf());
+ index++;
+ }
+}
+
+t.drop();
+dates = [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)];
+for (var i = 0; i < dates.length; i++) {
+ t.insert({x:dates[i]});
+}
+dates.sort(function(a,b){return a - b});
+reverseDates = dates.slice(0).reverse()
+
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
+t.ensureIndex({x:1})
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
+t.dropIndexes()
+t.ensureIndex({x:-1})
+checkSorting2(dates, 1)
+checkSorting2(reverseDates, -1)
diff --git a/jstests/sort2.js b/jstests/sort2.js
index 1e21414..6dfa848 100644
--- a/jstests/sort2.js
+++ b/jstests/sort2.js
@@ -1,22 +1,32 @@
// test sorting, mainly a test ver simple with no index
t = db.sort2;
-t.drop();
+t.drop();
t.save({x:1, y:{a:5,b:4}});
t.save({x:1, y:{a:7,b:3}});
t.save({x:1, y:{a:2,b:3}});
t.save({x:1, y:{a:9,b:3}});
-
for( var pass = 0; pass < 2; pass++ ) {
-
var res = t.find().sort({'y.a':1}).toArray();
assert( res[0].y.a == 2 );
assert( res[1].y.a == 5 );
assert( res.length == 4 );
-
t.ensureIndex({"y.a":1});
-
}
-
assert(t.validate().valid);
+
+t.drop();
+t.insert({ x: 1 })
+t.insert({ x: 5000000000 })
+t.insert({ x: NaN });
+t.insert({ x: Infinity });
+t.insert({ x: -Infinity });
+var good = [NaN, -Infinity, 1, 5000000000, Infinity];
+for (var pass = 0; pass < 2; pass++) {
+ var res = t.find({}, { _id: 0 }).sort({ x: 1 }).toArray();
+ for (var i = 0; i < good.length; i++) {
+ assert(good[i].toString() == res[i].x.toString());
+ }
+ t.ensureIndex({ x : 1 });
+}
diff --git a/jstests/sort7.js b/jstests/sort7.js
new file mode 100644
index 0000000..d73f13a
--- /dev/null
+++ b/jstests/sort7.js
@@ -0,0 +1,25 @@
+// Check sorting of array sub field SERVER-480.
+
+t = db.jstests_sort7;
+t.drop();
+
+// Compare indexed and unindexed sort order for an array embedded field.
+
+t.save( { a : [ { x : 2 } ] } );
+t.save( { a : [ { x : 1 } ] } );
+t.save( { a : [ { x : 3 } ] } );
+unindexed = t.find().sort( {"a.x":1} ).toArray();
+t.ensureIndex( { "a.x" : 1 } );
+indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
+assert.eq( unindexed, indexed );
+
+// Now check when there are two objects in the array.
+
+t.remove();
+t.save( { a : [ { x : 2 }, { x : 3 } ] } );
+t.save( { a : [ { x : 1 }, { x : 4 } ] } );
+t.save( { a : [ { x : 3 }, { x : 2 } ] } );
+unindexed = t.find().sort( {"a.x":1} ).toArray();
+t.ensureIndex( { "a.x" : 1 } );
+indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
+assert.eq( unindexed, indexed );
diff --git a/jstests/sort8.js b/jstests/sort8.js
new file mode 100644
index 0000000..169195b
--- /dev/null
+++ b/jstests/sort8.js
@@ -0,0 +1,30 @@
+// Check sorting of arrays indexed by key SERVER-2884
+
+t = db.jstests_sort8;
+t.drop();
+
+t.save( {a:[1,10]} );
+t.save( {a:5} );
+unindexedForward = t.find().sort( {a:1} ).toArray();
+unindexedReverse = t.find().sort( {a:-1} ).toArray();
+t.ensureIndex( {a:1} );
+indexedForward = t.find().sort( {a:1} ).hint( {a:1} ).toArray();
+indexedReverse = t.find().sort( {a:1} ).hint( {a:1} ).toArray();
+
+assert.eq( unindexedForward, indexedForward );
+assert.eq( unindexedReverse, indexedReverse );
+
+// Sorting is based on array members, not the array itself.
+assert.eq( [1,10], unindexedForward[ 0 ].a );
+assert.eq( [1,10], unindexedReverse[ 0 ].a );
+
+// Now try with a bounds constraint.
+t.dropIndexes();
+unindexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).toArray();
+unindexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).toArray();
+t.ensureIndex( {a:1} );
+indexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).hint( {a:1} ).toArray();
+indexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).hint( {a:1} ).toArray();
+
+assert.eq( unindexedForward, indexedForward );
+assert.eq( unindexedReverse, indexedReverse );
diff --git a/jstests/sort9.js b/jstests/sort9.js
new file mode 100644
index 0000000..62407d6
--- /dev/null
+++ b/jstests/sort9.js
@@ -0,0 +1,26 @@
+// Unindexed array sorting SERVER-2884
+
+t = db.jstests_sort9;
+t.drop();
+
+t.save( {a:[]} );
+t.save( {a:[[]]} );
+assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 2, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+
+t.drop();
+t.save( {} );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {'a.b':1} ).itcount() );
+
+t.drop();
+t.save( {a:{}} );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {a:1} ).itcount() );
+assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {'a.b':1} ).itcount() );
diff --git a/jstests/sorta.js b/jstests/sorta.js
new file mode 100644
index 0000000..7c82778
--- /dev/null
+++ b/jstests/sorta.js
@@ -0,0 +1,26 @@
+// SERVER-2905 sorting with missing fields
+
+t = db.jstests_sorta;
+t.drop();
+
+// Enable _allow_dot to try and bypass v8 field name checking.
+t.insert( {_id:0,a:MinKey}, true );
+t.save( {_id:3,a:null} );
+t.save( {_id:1,a:[]} );
+t.save( {_id:7,a:[2]} );
+t.save( {_id:4} );
+t.save( {_id:5,a:null} );
+t.save( {_id:2,a:[]} );
+t.save( {_id:6,a:1} );
+t.insert( {_id:8,a:MaxKey}, true );
+
+function sorted( arr ) {
+ assert.eq( 9, arr.length );
+ for( i = 1; i < arr.length; ++i ) {
+ assert.lte( arr[ i-1 ]._id, arr[ i ]._id );
+ }
+}
+
+sorted( t.find().sort( {a:1} ).toArray() );
+t.ensureIndex( {a:1} );
+sorted( t.find().sort( {a:1} ).hint( {a:1} ).toArray() );
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index ccf1d09..5eb7ab0 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -4,25 +4,25 @@ t = new ToolTest( "csv1" )
c = t.startDB( "foo" );
-base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-'};
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
assert.eq( 0 , c.count() , "setup1" );
c.insert( base );
delete base._id
assert.eq( 1 , c.count() , "setup2" );
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e" )
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
c.drop()
assert.eq( 0 , c.count() , "after drop" )
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e" );
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
assert.soon( "2 == c.count()" , "restore 2" );
a = c.find().sort( { a : 1 } ).toArray();
delete a[0]._id
delete a[1]._id
-assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e'} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
c.drop()
diff --git a/jstests/tool/csvexport1.js b/jstests/tool/csvexport1.js
new file mode 100644
index 0000000..eb4e6e3
--- /dev/null
+++ b/jstests/tool/csvexport1.js
@@ -0,0 +1,45 @@
+// csvexport1.js
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27"), c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i, e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectID(" + objId.toString() + ")", c : "[ 1, 2, 3 ]", d : "{ \"a\" : \"hello\", \"b\" : \"world\" }", e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+expected.push({ a : "D76DF8", b : "2009-08-27T00:00:00Z", c : "{ \"t\" : 1000 , \"i\" : 9876 }", d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq( expected[i], actual[i], "CSV export " + i);
+}
+
+
+t.stop() \ No newline at end of file
diff --git a/jstests/tool/csvexport2.js b/jstests/tool/csvexport2.js
new file mode 100644
index 0000000..3e0dd2c
--- /dev/null
+++ b/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/jstests/tool/csvimport1.js b/jstests/tool/csvimport1.js
new file mode 100644
index 0000000..3bff111
--- /dev/null
+++ b/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/jstests/tool/data/a.tsv b/jstests/tool/data/a.tsv
new file mode 100644
index 0000000..1e09417
--- /dev/null
+++ b/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/jstests/tool/data/csvimport1.csv b/jstests/tool/data/csvimport1.csv
new file mode 100644
index 0000000..256d40a
--- /dev/null
+++ b/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/jstests/tool/data/dumprestore6/foo.bson b/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 0000000..b8f8f99
--- /dev/null
+++ b/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/jstests/tool/data/dumprestore6/system.indexes.bson b/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 0000000..dde25da
--- /dev/null
+++ b/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/jstests/tool/dumprestore5.js b/jstests/tool/dumprestore5.js
new file mode 100644
index 0000000..ce28fea
--- /dev/null
+++ b/jstests/tool/dumprestore5.js
@@ -0,0 +1,36 @@
+// dumprestore5.js
+
+t = new ToolTest( "dumprestore5" );
+
+t.startDB( "foo" );
+
+db = t.db
+
+db.addUser('user','password')
+
+assert.eq(1, db.system.users.count(), "setup")
+assert.eq(1, db.system.indexes.count(), "setup2")
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase()
+
+assert.eq(0, db.system.users.count(), "didn't drop users")
+assert.eq(0, db.system.indexes.count(), "didn't drop indexes")
+
+t.runTool("restore", "--dir", t.ext)
+
+assert.soon("db.system.users.findOne()", "no data after restore");
+assert.eq(1, db.system.users.find({user:'user'}).count(), "didn't restore users")
+assert.eq(1, db.system.indexes.count(), "didn't restore indexes")
+
+db.removeUser('user')
+db.addUser('user2', 'password2')
+
+t.runTool("restore", "--dir", t.ext, "--drop")
+
+assert.soon("1 == db.system.users.find({user:'user'}).count()", "didn't restore users 2")
+assert.eq(0, db.system.users.find({user:'user2'}).count(), "didn't drop users")
+assert.eq(1, db.system.indexes.count(), "didn't maintain indexes")
+
+t.stop();
diff --git a/jstests/tool/dumprestore6.js b/jstests/tool/dumprestore6.js
new file mode 100644
index 0000000..d8b349e
--- /dev/null
+++ b/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/jstests/tool/exportimport1.js b/jstests/tool/exportimport1.js
index 915adcd..451078e 100644
--- a/jstests/tool/exportimport1.js
+++ b/jstests/tool/exportimport1.js
@@ -4,7 +4,8 @@ t = new ToolTest( "exportimport1" );
c = t.startDB( "foo" );
assert.eq( 0 , c.count() , "setup1" );
-c.save( { a : 22 } );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
assert.eq( 1 , c.count() , "setup2" );
t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
@@ -15,8 +16,11 @@ assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
assert.soon( "c.findOne()" , "no data after sleep" );
assert.eq( 1 , c.count() , "after restore 2" );
-assert.eq( 22 , c.findOne().a , "after restore 2" );
-
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+}
// now with --jsonArray
@@ -30,4 +34,23 @@ assert.soon( "c.findOne()" , "no data after sleep" );
assert.eq( 1 , c.count() , "after restore 2" );
assert.eq( 22 , c.findOne().a , "after restore 2" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+}
+
+
t.stop();
diff --git a/jstests/tool/tsv1.js b/jstests/tool/tsv1.js
new file mode 100644
index 0000000..1b0ddbb
--- /dev/null
+++ b/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/jstests/type2.js b/jstests/type2.js
new file mode 100644
index 0000000..820607e
--- /dev/null
+++ b/jstests/type2.js
@@ -0,0 +1,19 @@
+// SERVER-1735 $type:10 matches null value, not missing value.
+
+t = db.jstests_type2;
+t.drop();
+
+t.save( {a:null} );
+t.save( {} );
+t.save( {a:'a'} );
+
+function test() {
+ assert.eq( 2, t.count( {a:null} ) );
+ assert.eq( 1, t.count( {a:{$type:10}} ) );
+ assert.eq( 2, t.count( {a:{$exists:true}} ) );
+ assert.eq( 1, t.count( {a:{$exists:false}} ) );
+}
+
+test();
+t.ensureIndex( {a:1} );
+test(); \ No newline at end of file
diff --git a/jstests/type3.js b/jstests/type3.js
new file mode 100644
index 0000000..b16502b
--- /dev/null
+++ b/jstests/type3.js
@@ -0,0 +1,68 @@
+// Check query type bracketing SERVER-3222
+
+t = db.jstests_type3;
+t.drop();
+
+t.ensureIndex( {a:1} );
+
+// Type Object
+t.save( {a:{'':''}} );
+assert.eq( 1, t.find( {a:{$type:3}} ).hint( {a:1} ).itcount() );
+
+// Type Array
+t.remove();
+t.save( {a:[['c']]} );
+assert.eq( 1, t.find( {a:{$type:4}} ).hint( {a:1} ).itcount() );
+
+// Type RegEx
+t.remove();
+t.save( {a:/r/} );
+assert.eq( 1, t.find( {a:{$type:11}} ).hint( {a:1} ).itcount() );
+
+// Type jstNULL
+t.remove();
+assert.eq( [[null,null]], t.find( {a:{$type:10}} ).hint( {a:1} ).explain().indexBounds.a );
+
+// Type Undefined
+t.remove();
+// 'null' is the client friendly version of undefined.
+assert.eq( [[null,null]], t.find( {a:{$type:6}} ).hint( {a:1} ).explain().indexBounds.a );
+
+t.save( {a:undefined} );
+assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+
+// This one won't be returned.
+t.save( {a:null} );
+assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+
+t.remove();
+// Type MinKey
+assert.eq( [[{$minElement:1},{$minElement:1}]], t.find( {a:{$type:-1}} ).hint( {a:1} ).explain().indexBounds.a );
+// Type MaxKey
+assert.eq( [[{$maxElement:1},{$maxElement:1}]], t.find( {a:{$type:127}} ).hint( {a:1} ).explain().indexBounds.a );
+
+// Type Timestamp
+t.remove();
+t.save( {a:new Timestamp()} );
+assert.eq( 1, t.find( {a:{$type:17}} ).itcount() );
+if ( 0 ) { // SERVER-3304
+assert.eq( 0, t.find( {a:{$type:9}} ).itcount() );
+}
+
+// Type Date
+t.remove();
+t.save( {a:new Date()} );
+if ( 0 ) { // SERVER-3304
+assert.eq( 0, t.find( {a:{$type:17}} ).itcount() );
+}
+assert.eq( 1, t.find( {a:{$type:9}} ).itcount() );
+
+// Type Code
+t.remove();
+t.save( {a:function(){var a = 0;}} );
+assert.eq( 1, t.find( {a:{$type:13}} ).itcount() );
+
+// Type BinData
+t.remove();
+t.save( {a:new BinData(0,'')} );
+assert.eq( 1, t.find( {a:{$type:5}} ).itcount() );
diff --git a/jstests/unique2.js b/jstests/unique2.js
index 42cf9fb..1c28288 100644
--- a/jstests/unique2.js
+++ b/jstests/unique2.js
@@ -1,3 +1,11 @@
+// Test unique and dropDups index options.
+
+function checkNprev( np ) {
+ // getPrevError() is not available sharded.
+ if ( typeof( myShardingTest ) == 'undefined' ) {
+ assert.eq( np, db.getPrevError().nPrev );
+ }
+}
t = db.jstests_unique2;
@@ -21,7 +29,9 @@ t.ensureIndex({k:1}, {unique:true});
t.insert({k:3});
t.insert({k:[2,3]});
+assert( db.getLastError() );
t.insert({k:[4,3]});
+assert( db.getLastError() );
assert( t.count() == 1 ) ;
assert( t.find().sort({k:1}).toArray().length == 1 ) ;
@@ -33,9 +43,52 @@ t.insert({k:[2,3]});
t.insert({k:[4,3]});
assert( t.count() == 3 ) ;
+// Trigger an error, so we can test n of getPrevError() later.
+assert.throws( function() { t.find( {$where:'aaa'} ).itcount(); } );
+assert( db.getLastError() );
+checkNprev( 1 );
+
t.ensureIndex({k:1}, {unique:true, dropDups:true});
+// Check error flag was not set SERVER-2054.
+assert( !db.getLastError() );
+// Check that offset of previous error is correct.
+checkNprev( 2 );
+
+// Check the dups were dropped.
+assert( t.count() == 1 ) ;
+assert( t.find().sort({k:1}).toArray().length == 1 ) ;
+assert( t.find().sort({k:1}).count() == 1 ) ;
+
+// Check that a new conflicting insert will cause an error.
+t.insert({k:[2,3]});
+assert( db.getLastError() );
+
+t.drop();
+t.insert({k:3});
+t.insert({k:[2,3]});
+t.insert({k:[4,3]});
+assert( t.count() == 3 ) ;
+
+
+// Now try with a background index op.
+
+// Trigger an error, so we can test n of getPrevError() later.
+assert.throws( function() { t.find( {$where:'aaa'} ).itcount(); } );
+assert( db.getLastError() );
+checkNprev( 1 );
+
+t.ensureIndex({k:1}, {background:true, unique:true, dropDups:true});
+// Check error flag was not set SERVER-2054.
+assert( !db.getLastError() );
+// Check that offset of pervious error is correct.
+checkNprev( 2 );
+
+// Check the dups were dropped.
assert( t.count() == 1 ) ;
assert( t.find().sort({k:1}).toArray().length == 1 ) ;
assert( t.find().sort({k:1}).count() == 1 ) ;
+// Check that a new conflicting insert will cause an error.
+t.insert({k:[2,3]});
+assert( db.getLastError() );
diff --git a/jstests/uniqueness.js b/jstests/uniqueness.js
index f1651b3..ce19ad0 100644
--- a/jstests/uniqueness.js
+++ b/jstests/uniqueness.js
@@ -26,8 +26,21 @@ db.jstests_uniqueness2.drop();
db.jstests_uniqueness2.insert({a:3});
db.jstests_uniqueness2.insert({a:3});
assert( db.jstests_uniqueness2.count() == 2 , 6) ;
+db.resetError();
db.jstests_uniqueness2.ensureIndex({a:1}, true);
assert( db.getLastError() , 7);
+assert( db.getLastError().match( /E11000/ ) );
+
+// Check for an error message when we index in the background and there are dups
+db.jstests_uniqueness2.drop();
+db.jstests_uniqueness2.insert({a:3});
+db.jstests_uniqueness2.insert({a:3});
+assert( db.jstests_uniqueness2.count() == 2 , 6) ;
+assert( !db.getLastError() );
+db.resetError();
+db.jstests_uniqueness2.ensureIndex({a:1}, {unique:true,background:true});
+assert( db.getLastError() , 7);
+assert( db.getLastError().match( /E11000/ ) );
/* Check that if we update and remove _id, it gets added back by the DB */
diff --git a/jstests/update.js b/jstests/update.js
index 70f9f15..d388918 100644
--- a/jstests/update.js
+++ b/jstests/update.js
@@ -23,3 +23,16 @@ for(var i=1; i<=5000; i++) {
}
assert(asdf.validate().valid);
+
+var stats = db.runCommand({ collstats: "asdf" });
+
+// some checks. want to check that padding factor is working; in addition this lets us do a little basic
+// testing of the collstats command at the same time
+assert(stats.count == 5000);
+assert(stats.size < 140433012 * 5 && stats.size > 1000000);
+assert(stats.numExtents < 20);
+assert(stats.nindexes == 1);
+var pf = stats.paddingFactor;
+print("update.js padding factor: " + pf);
+assert(pf > 1.7 && pf < 2);
+
diff --git a/jstests/update_blank1.js b/jstests/update_blank1.js
new file mode 100644
index 0000000..8742bd2
--- /dev/null
+++ b/jstests/update_blank1.js
@@ -0,0 +1,12 @@
+
+t = db.update_blank1
+t.drop();
+
+orig = { _id : 1 , "" : 1 , "a" : 2 , "b" : 3 };
+t.insert( orig );
+assert.eq( orig , t.findOne() , "A1" );
+
+t.update( {} , { $set : { "c" : 1 } } );
+print( db.getLastError() );
+orig["c"] = 1;
+//assert.eq( orig , t.findOne() , "A2" ); // SERVER-2651
diff --git a/jstests/update_invalid1.js b/jstests/update_invalid1.js
new file mode 100644
index 0000000..7c94507
--- /dev/null
+++ b/jstests/update_invalid1.js
@@ -0,0 +1,6 @@
+
+t = db.update_invalid1
+t.drop()
+
+t.update( { _id : 5 } , { $set : { $inc : { x : 5 } } } , true );
+assert.eq( 0 , t.count() , "A1" );
diff --git a/jstests/updatea.js b/jstests/updatea.js
index 9864aa6..5b45d60 100644
--- a/jstests/updatea.js
+++ b/jstests/updatea.js
@@ -47,4 +47,10 @@ t.update( {} , { $inc: { "a.10" : 1 } } );
orig.a[10]++;
+// SERVER-3218
+t.drop()
+t.insert({"a":{"c00":1}, 'c':2})
+t.update({"c":2}, {'$inc':{'a.c000':1}})
+
+assert.eq( { "c00" : 1 , "c000" : 1 } , t.findOne().a , "D1" )
diff --git a/jstests/updatef.js b/jstests/updatef.js
new file mode 100644
index 0000000..6942593
--- /dev/null
+++ b/jstests/updatef.js
@@ -0,0 +1,24 @@
+// Test unsafe management of nsdt on update command yield SERVER-3208
+
+prefixNS = db.jstests_updatef;
+prefixNS.save( {} );
+
+t = db.jstests_updatef_actual;
+t.drop();
+
+t.save( {a:0,b:[]} );
+for( i = 0; i < 1000; ++i ) {
+ t.save( {a:100} );
+}
+t.save( {a:0,b:[]} );
+
+db.getLastError();
+// Repeatedly rename jstests_updatef to jstests_updatef_ and back. This will
+// invalidate the jstests_updatef_actual NamespaceDetailsTransient object.
+s = startParallelShell( "for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }" );
+
+for( i=0; i < 20; ++i ) {
+ t.update( {a:0}, {$push:{b:i}}, false, true );
+}
+
+s();
diff --git a/jstests/updateg.js b/jstests/updateg.js
new file mode 100644
index 0000000..f8d452f
--- /dev/null
+++ b/jstests/updateg.js
@@ -0,0 +1,17 @@
+// SERVER-3370 check modifiers with field name characters comparing less than '.' character.
+
+t = db.jstests_updateg;
+
+t.drop();
+t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }}, true);
+assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+
+t.drop();
+t.save({ 'all' : {}, 'all-copy' : {}});
+t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }});
+assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+
+t.drop();
+t.save({ 'all11' : {}, 'all2' : {}});
+t.update({}, { '$inc' : { 'all11.t' : 1, 'all2.t' : 1 }});
+assert.eq( 1, t.count( {all11:{t:1},'all2':{t:1}} ) );