From 5d342a758c6095b4d30aba0750b54f13b8916f51 Mon Sep 17 00:00:00 2001 From: Antonin Kral Date: Wed, 14 Sep 2011 17:08:06 +0200 Subject: Imported Upstream version 2.0.0 --- jstests/ageoutjournalfiles.js | 16 + jstests/and.js | 86 ++++ jstests/and2.js | 27 ++ jstests/and3.js | 66 +++ jstests/andor.js | 105 +++++ jstests/apitest_dbcollection.js | 2 +- jstests/array_match2.js | 25 ++ jstests/array_match3.js | 13 + jstests/arrayfind2.js | 3 +- jstests/arrayfind4.js | 22 + jstests/arrayfind5.js | 23 + jstests/auth/auth1.js | 5 + jstests/auth/auth2.js | 23 + jstests/auth/rename.js | 40 ++ jstests/auth1.js | 17 + jstests/auth2.js | 6 +- jstests/bench_test1.js | 16 + jstests/bench_test2.js | 41 ++ jstests/big_object1.js | 2 + jstests/binData.js | 14 + jstests/capped.js | 8 +- jstests/capped2.js | 10 +- jstests/capped5.js | 7 +- jstests/capped6.js | 2 +- jstests/capped8.js | 40 +- jstests/capped9.js | 28 ++ jstests/cappeda.js | 33 ++ jstests/compact.js | 37 ++ jstests/compact_speed_test.js | 61 +++ jstests/date1.js | 5 +- jstests/date2.js | 13 + jstests/date3.js | 31 ++ jstests/dbcase.js | 16 +- jstests/dbcase2.js | 9 + jstests/dbhash.js | 10 +- jstests/delx.js | 1 + jstests/disk/directoryperdb.js | 2 + jstests/disk/diskfull.js | 8 +- jstests/disk/newcollection.js | 20 +- jstests/disk/norepeat.js | 2 +- jstests/disk/quota.js | 47 ++ jstests/disk/quota2.js | 38 ++ jstests/disk/repair3.js | 2 +- jstests/disk/repair5.js | 43 ++ jstests/distinct1.js | 1 + jstests/distinct_index1.js | 10 + jstests/drop2.js | 2 +- jstests/drop3.js | 29 ++ jstests/dropdb.js | 17 + jstests/dropdb_race.js | 44 ++ jstests/dur/closeall.js | 76 ++-- jstests/dur/data/empty.bson | 0 jstests/dur/diskfull.js | 51 +-- jstests/dur/dropdb.js | 21 +- jstests/dur/dur1.js | 25 +- jstests/dur/dur1_tool.js | 152 +++++++ jstests/dur/indexbg.js | 7 + jstests/dur/indexbg2.js | 19 + jstests/dur/manyRestart.js | 4 + jstests/eval_nolock.js | 2 +- jstests/evalb.js | 2 +- jstests/evalc.js | 11 +- jstests/evald.js | 10 +- jstests/exists3.js | 21 + jstests/exists4.js | 20 + jstests/exists5.js | 33 ++ jstests/exists6.js | 63 +++ jstests/exists7.js | 21 + jstests/exists8.js | 76 ++++ jstests/exists9.js | 41 ++ jstests/find8.js | 27 ++ jstests/find_and_modify2.js | 6 + jstests/fsync.js | 17 +- jstests/geo10.js | 21 + jstests/geo4.js | 2 +- jstests/geo_array0.js | 25 ++ jstests/geo_array1.js | 30 ++ jstests/geo_array2.js | 163 +++++++ jstests/geo_borders.js | 263 +++++------ jstests/geo_center_sphere2.js | 158 +++++++ jstests/geo_distinct.js | 16 + jstests/geo_fiddly_box.js | 44 ++ jstests/geo_fiddly_box2.js | 32 ++ jstests/geo_group.js | 35 ++ jstests/geo_mapreduce.js | 56 +++ jstests/geo_mapreduce2.js | 36 ++ jstests/geo_multinest0.js | 63 +++ jstests/geo_multinest1.js | 37 ++ jstests/geo_oob_sphere.js | 42 ++ jstests/geo_poly_edge.js | 22 + jstests/geo_poly_line.js | 17 + jstests/geo_polygon1.js | 74 ++++ jstests/geo_polygon2.js | 266 +++++++++++ jstests/geo_polygon3.js | 54 +++ jstests/geo_regex0.js | 18 + jstests/geo_small_large.js | 151 +++++++ jstests/geo_uniqueDocs.js | 38 ++ jstests/getlog1.js | 24 + jstests/group7.js | 43 ++ jstests/hint1.js | 12 +- jstests/idhack.js | 23 + jstests/in8.js | 23 + jstests/in9.js | 35 ++ jstests/ina.js | 15 + jstests/index11.js | 30 +- jstests/index9.js | 8 + jstests/index_big1.js | 39 ++ jstests/index_bigkeys.js | 78 ++++ jstests/index_check5.js | 2 +- jstests/index_check8.js | 12 +- jstests/index_fornew.js | 13 - jstests/index_maxkey.js | 27 ++ jstests/indexbindata.js | 0 jstests/indexk.js | 58 +++ jstests/indexl.js | 27 ++ jstests/indexm.js | 38 ++ jstests/indexn.js | 41 ++ jstests/indexo.js | 32 ++ jstests/indexp.js | 58 +++ jstests/indexq.js | 14 + jstests/indexr.js | 47 ++ jstests/indexs.js | 21 + jstests/indext.js | 21 + jstests/indexu.js | 137 ++++++ jstests/indexv.js | 18 + jstests/indexw.js | 14 + jstests/insert1.js | 3 + jstests/libs/geo_near_random.js | 37 +- jstests/libs/key1 | 1 + jstests/libs/key2 | 1 + jstests/libs/testconfig | 4 + jstests/mr_errorhandling.js | 2 + jstests/mr_merge2.js | 37 ++ jstests/numberint.js | 92 ++++ jstests/numberlong2.js | 32 ++ jstests/numberlong3.js | 25 ++ jstests/or1.js | 2 +- jstests/or2.js | 3 +- jstests/or3.js | 4 +- jstests/or4.js | 2 +- jstests/ord.js | 1 + jstests/org.js | 19 + jstests/orh.js | 17 + jstests/ori.js | 48 ++ jstests/orj.js | 121 +++++ jstests/ork.js | 11 + jstests/orl.js | 13 + jstests/orm.js | 29 ++ jstests/orn.js | 22 + jstests/profile1.js | 144 ++++-- jstests/profile2.js | 19 + jstests/profile3.js | 26 ++ jstests/push.js | 36 +- jstests/query1.js | 3 + jstests/regex2.js | 8 + jstests/regex6.js | 11 +- jstests/regexa.js | 19 + jstests/remove10.js | 28 ++ jstests/remove2.js | 5 + jstests/remove9.js | 16 + jstests/rename.js | 19 +- jstests/repl/basic1.js | 19 +- jstests/repl/dbcase.js | 95 ++++ jstests/repl/drop_dups.js | 68 +++ jstests/repl/mastermaster1.js | 23 +- jstests/repl/mod_move.js | 69 +++ jstests/repl/pair1.js | 100 ----- jstests/repl/pair2.js | 71 --- jstests/repl/pair3.js | 245 ----------- jstests/repl/pair4.js | 160 ------- jstests/repl/pair5.js | 95 ---- jstests/repl/pair6.js | 115 ----- jstests/repl/pair7.js | 85 ---- jstests/repl/repl2.js | 29 +- jstests/repl/repl3.js | 58 +-- jstests/repl/replacePeer1.js | 82 ---- jstests/repl/replacePeer2.js | 86 ---- jstests/repl/snapshot2.js | 72 --- jstests/repl/snapshot3.js | 53 --- jstests/replsets/auth1.js | 35 +- jstests/replsets/cloneDb.js | 18 +- jstests/replsets/config1.js | 21 - jstests/replsets/downstream.js | 36 ++ jstests/replsets/fastsync.js | 151 +++++-- jstests/replsets/initial_sync1.js | 5 +- jstests/replsets/initial_sync3.js | 37 +- jstests/replsets/key1 | 1 - jstests/replsets/key2 | 1 - jstests/replsets/maintenance.js | 32 ++ jstests/replsets/majority.js | 60 +++ jstests/replsets/randomcommands1.js | 29 -- jstests/replsets/reconfig.js | 69 +++ jstests/replsets/remove1.js | 130 +++--- jstests/replsets/replset1.js | 22 + jstests/replsets/replset3.js | 2 +- jstests/replsets/replset5.js | 88 ++-- jstests/replsets/replsetadd.js | 34 +- jstests/replsets/replsetarb1.js | 33 -- jstests/replsets/replsetarb2.js | 13 +- jstests/replsets/replsetarb3.js | 144 ------ jstests/replsets/replsetfreeze.js | 4 +- jstests/replsets/replsetrestart1.js | 14 +- jstests/replsets/replsetrestart2.js | 8 +- jstests/replsets/rollback2.js | 19 +- jstests/replsets/rollback4.js | 117 +++++ jstests/replsets/rslib.js | 44 +- jstests/replsets/slavedelay1.js | 104 +++-- jstests/replsets/stale_clustered.js | 101 +++++ jstests/replsets/stepdown.js | 142 ++++++ jstests/replsets/stepdown2.js | 139 ++++++ jstests/replsets/sync1.js | 396 +++++++++-------- jstests/replsets/sync2.js | 48 ++ jstests/replsets/tags.js | 154 +++++++ jstests/replsets/tags2.js | 44 ++ jstests/replsets/toostale.js | 34 +- jstests/replsets/twosets.js | 35 -- jstests/set7.js | 16 + jstests/sharding/addshard1.js | 2 +- jstests/sharding/addshard4.js | 26 +- jstests/sharding/array_shard_key.js | 127 ++++++ jstests/sharding/auth.js | 177 ++++++++ jstests/sharding/count_slaveok.js | 69 +++ jstests/sharding/drop_sharded_db.js | 62 +++ jstests/sharding/features2.js | 11 +- jstests/sharding/features3.js | 61 ++- jstests/sharding/group_slaveok.js | 68 +++ jstests/sharding/index1.js | 174 ++++++++ jstests/sharding/migrateBig.js | 2 +- jstests/sharding/migrateMemory.js | 54 +++ jstests/sharding/multi_mongos1.js | 3 +- jstests/sharding/multi_mongos2.js | 61 +++ jstests/sharding/parallel.js | 38 ++ jstests/sharding/shard3.js | 12 +- jstests/sharding/shard6.js | 3 + jstests/sharding/shard_insert_getlasterror_w2.js | 3 +- jstests/sharding/shard_keycount.js | 45 ++ jstests/sharding/sharding_with_keyfile.js | 69 +++ jstests/sharding/sharding_with_keyfile.key | 3 + jstests/sharding/sync6.js | 81 ++++ jstests/sharding/sync7.js | 63 +++ jstests/shell1.js | 6 + jstests/shellkillop.js | 126 +++--- jstests/shellspawn.js | 6 +- jstests/skip1.js | 15 + jstests/slowNightly/background.js | 51 +++ jstests/slowNightly/command_line_parsing.js | 12 + jstests/slowNightly/dur_big_atomic_update.js | 17 + jstests/slowNightly/dur_remove_old_journals.js | 27 +- jstests/slowNightly/geo_axis_aligned.js | 108 +++++ jstests/slowNightly/geo_mnypts.js | 51 +++ jstests/slowNightly/geo_polygon.js | 53 +++ jstests/slowNightly/index_check10.js | 133 ++++++ jstests/slowNightly/index_check9.js | 2 + jstests/slowNightly/replReads.js | 108 +++++ jstests/slowNightly/replsets_priority1.js | 173 ++++++++ jstests/slowNightly/sharding_balance1.js | 3 +- jstests/slowNightly/sharding_balance4.js | 8 +- jstests/slowNightly/sharding_migrateBigObject.js | 61 +++ jstests/slowNightly/sharding_multiple_ns_rs.js | 49 +++ jstests/slowNightly/sharding_passthrough.js | 16 +- jstests/slowNightly/sharding_rs1.js | 8 +- jstests/slowNightly/sharding_rs2.js | 22 + jstests/slowNightly/sharding_rs_arb1.js | 40 ++ jstests/slowNightly/sync6_slow.js | 82 ++++ jstests/slowWeekly/geo_full.js | 487 +++++++++++++++++++++ jstests/slowWeekly/geo_mnypts_plus_fields.js | 98 +++++ jstests/slowWeekly/query_yield2.js | 2 +- jstests/slowWeekly/repair2.js | 29 ++ jstests/slowWeekly/update_yield1.js | 2 +- jstests/sort10.js | 48 ++ jstests/sort2.js | 22 +- jstests/sort7.js | 25 ++ jstests/sort8.js | 30 ++ jstests/sort9.js | 26 ++ jstests/sorta.js | 26 ++ jstests/tool/csv1.js | 8 +- jstests/tool/csvexport1.js | 45 ++ jstests/tool/csvexport2.js | 31 ++ jstests/tool/csvimport1.js | 40 ++ jstests/tool/data/a.tsv | 2 + jstests/tool/data/csvimport1.csv | 8 + jstests/tool/data/dumprestore6/foo.bson | Bin 0 -> 44 bytes jstests/tool/data/dumprestore6/system.indexes.bson | Bin 0 -> 144 bytes jstests/tool/dumprestore5.js | 36 ++ jstests/tool/dumprestore6.js | 27 ++ jstests/tool/exportimport1.js | 29 +- jstests/tool/tsv1.js | 32 ++ jstests/type2.js | 19 + jstests/type3.js | 68 +++ jstests/unique2.js | 53 +++ jstests/uniqueness.js | 13 + jstests/update.js | 13 + jstests/update_blank1.js | 12 + jstests/update_invalid1.js | 6 + jstests/updatea.js | 6 + jstests/updatef.js | 24 + jstests/updateg.js | 17 + 297 files changed, 10431 insertions(+), 2415 deletions(-) create mode 100644 jstests/ageoutjournalfiles.js create mode 100644 jstests/and.js create mode 100644 jstests/and2.js create mode 100644 jstests/and3.js create mode 100644 jstests/andor.js create mode 100644 jstests/array_match2.js create mode 100644 jstests/array_match3.js create mode 100644 jstests/arrayfind4.js create mode 100644 jstests/arrayfind5.js create mode 100644 jstests/auth/auth2.js create mode 100644 jstests/auth/rename.js create mode 100644 jstests/bench_test1.js create mode 100644 jstests/bench_test2.js create mode 100644 jstests/binData.js create mode 100644 jstests/capped9.js create mode 100644 jstests/cappeda.js create mode 100644 jstests/compact.js create mode 100755 jstests/compact_speed_test.js create mode 100644 jstests/date2.js create mode 100644 jstests/date3.js create mode 100644 jstests/dbcase2.js create mode 100644 jstests/disk/quota.js create mode 100644 jstests/disk/quota2.js create mode 100644 jstests/disk/repair5.js create mode 100644 jstests/drop3.js create mode 100644 jstests/dropdb.js create mode 100644 jstests/dropdb_race.js create mode 100644 jstests/dur/data/empty.bson create mode 100755 jstests/dur/dur1_tool.js create mode 100644 jstests/dur/indexbg.js create mode 100644 jstests/dur/indexbg2.js create mode 100644 jstests/exists3.js create mode 100644 jstests/exists4.js create mode 100644 jstests/exists5.js create mode 100644 jstests/exists6.js create mode 100644 jstests/exists7.js create mode 100644 jstests/exists8.js create mode 100644 jstests/exists9.js create mode 100644 jstests/find8.js create mode 100644 jstests/geo10.js create mode 100644 jstests/geo_array0.js create mode 100644 jstests/geo_array1.js create mode 100644 jstests/geo_array2.js create mode 100644 jstests/geo_center_sphere2.js create mode 100644 jstests/geo_distinct.js create mode 100644 jstests/geo_fiddly_box.js create mode 100644 jstests/geo_fiddly_box2.js create mode 100644 jstests/geo_group.js create mode 100644 jstests/geo_mapreduce.js create mode 100644 jstests/geo_mapreduce2.js create mode 100644 jstests/geo_multinest0.js create mode 100644 jstests/geo_multinest1.js create mode 100644 jstests/geo_oob_sphere.js create mode 100644 jstests/geo_poly_edge.js create mode 100644 jstests/geo_poly_line.js create mode 100644 jstests/geo_polygon1.js create mode 100644 jstests/geo_polygon2.js create mode 100644 jstests/geo_polygon3.js create mode 100644 jstests/geo_regex0.js create mode 100644 jstests/geo_small_large.js create mode 100644 jstests/geo_uniqueDocs.js create mode 100644 jstests/getlog1.js create mode 100644 jstests/group7.js create mode 100644 jstests/idhack.js create mode 100644 jstests/in8.js create mode 100644 jstests/in9.js create mode 100644 jstests/ina.js create mode 100644 jstests/index_big1.js create mode 100755 jstests/index_bigkeys.js delete mode 100644 jstests/index_fornew.js create mode 100644 jstests/index_maxkey.js create mode 100755 jstests/indexbindata.js create mode 100644 jstests/indexk.js create mode 100644 jstests/indexl.js create mode 100644 jstests/indexm.js create mode 100644 jstests/indexn.js create mode 100644 jstests/indexo.js create mode 100644 jstests/indexp.js create mode 100644 jstests/indexq.js create mode 100644 jstests/indexr.js create mode 100644 jstests/indexs.js create mode 100644 jstests/indext.js create mode 100644 jstests/indexu.js create mode 100644 jstests/indexv.js create mode 100644 jstests/indexw.js create mode 100644 jstests/libs/key1 create mode 100644 jstests/libs/key2 create mode 100644 jstests/libs/testconfig create mode 100644 jstests/mr_merge2.js create mode 100644 jstests/numberint.js create mode 100644 jstests/numberlong2.js create mode 100644 jstests/numberlong3.js create mode 100644 jstests/org.js create mode 100644 jstests/orh.js create mode 100644 jstests/ori.js create mode 100644 jstests/orj.js create mode 100644 jstests/ork.js create mode 100644 jstests/orl.js create mode 100644 jstests/orm.js create mode 100644 jstests/orn.js create mode 100644 jstests/profile2.js create mode 100644 jstests/profile3.js create mode 100644 jstests/regexa.js create mode 100644 jstests/remove10.js create mode 100644 jstests/remove9.js create mode 100644 jstests/repl/dbcase.js create mode 100644 jstests/repl/drop_dups.js create mode 100644 jstests/repl/mod_move.js delete mode 100644 jstests/repl/pair1.js delete mode 100644 jstests/repl/pair2.js delete mode 100644 jstests/repl/pair3.js delete mode 100644 jstests/repl/pair4.js delete mode 100644 jstests/repl/pair5.js delete mode 100644 jstests/repl/pair6.js delete mode 100644 jstests/repl/pair7.js delete mode 100644 jstests/repl/replacePeer1.js delete mode 100644 jstests/repl/replacePeer2.js delete mode 100644 jstests/repl/snapshot2.js delete mode 100644 jstests/repl/snapshot3.js delete mode 100644 jstests/replsets/config1.js create mode 100755 jstests/replsets/downstream.js delete mode 100644 jstests/replsets/key1 delete mode 100644 jstests/replsets/key2 create mode 100644 jstests/replsets/maintenance.js create mode 100644 jstests/replsets/majority.js delete mode 100644 jstests/replsets/randomcommands1.js create mode 100644 jstests/replsets/reconfig.js delete mode 100644 jstests/replsets/replsetarb1.js delete mode 100644 jstests/replsets/replsetarb3.js create mode 100644 jstests/replsets/rollback4.js create mode 100644 jstests/replsets/stale_clustered.js create mode 100644 jstests/replsets/stepdown.js create mode 100755 jstests/replsets/stepdown2.js create mode 100644 jstests/replsets/sync2.js create mode 100644 jstests/replsets/tags.js create mode 100644 jstests/replsets/tags2.js delete mode 100644 jstests/replsets/twosets.js create mode 100644 jstests/sharding/array_shard_key.js create mode 100644 jstests/sharding/auth.js create mode 100644 jstests/sharding/count_slaveok.js create mode 100644 jstests/sharding/drop_sharded_db.js create mode 100644 jstests/sharding/group_slaveok.js create mode 100644 jstests/sharding/index1.js create mode 100644 jstests/sharding/migrateMemory.js create mode 100644 jstests/sharding/multi_mongos2.js create mode 100644 jstests/sharding/parallel.js create mode 100644 jstests/sharding/shard_keycount.js create mode 100644 jstests/sharding/sharding_with_keyfile.js create mode 100755 jstests/sharding/sharding_with_keyfile.key create mode 100644 jstests/sharding/sync6.js create mode 100644 jstests/sharding/sync7.js create mode 100644 jstests/shell1.js create mode 100644 jstests/skip1.js create mode 100644 jstests/slowNightly/background.js create mode 100644 jstests/slowNightly/geo_axis_aligned.js create mode 100644 jstests/slowNightly/geo_mnypts.js create mode 100644 jstests/slowNightly/geo_polygon.js create mode 100644 jstests/slowNightly/index_check10.js create mode 100644 jstests/slowNightly/replReads.js create mode 100644 jstests/slowNightly/replsets_priority1.js create mode 100644 jstests/slowNightly/sharding_migrateBigObject.js create mode 100644 jstests/slowNightly/sharding_multiple_ns_rs.js create mode 100644 jstests/slowNightly/sharding_rs_arb1.js create mode 100644 jstests/slowNightly/sync6_slow.js create mode 100644 jstests/slowWeekly/geo_full.js create mode 100644 jstests/slowWeekly/geo_mnypts_plus_fields.js create mode 100644 jstests/slowWeekly/repair2.js create mode 100644 jstests/sort10.js create mode 100644 jstests/sort7.js create mode 100644 jstests/sort8.js create mode 100644 jstests/sort9.js create mode 100644 jstests/sorta.js create mode 100644 jstests/tool/csvexport1.js create mode 100644 jstests/tool/csvexport2.js create mode 100644 jstests/tool/csvimport1.js create mode 100644 jstests/tool/data/a.tsv create mode 100644 jstests/tool/data/csvimport1.csv create mode 100644 jstests/tool/data/dumprestore6/foo.bson create mode 100644 jstests/tool/data/dumprestore6/system.indexes.bson create mode 100644 jstests/tool/dumprestore5.js create mode 100644 jstests/tool/dumprestore6.js create mode 100644 jstests/tool/tsv1.js create mode 100644 jstests/type2.js create mode 100644 jstests/type3.js create mode 100644 jstests/update_blank1.js create mode 100644 jstests/update_invalid1.js create mode 100644 jstests/updatef.js create mode 100644 jstests/updateg.js (limited to 'jstests') diff --git a/jstests/ageoutjournalfiles.js b/jstests/ageoutjournalfiles.js new file mode 100644 index 0000000..3c12cd8 --- /dev/null +++ b/jstests/ageoutjournalfiles.js @@ -0,0 +1,16 @@ +if (false && db.serverStatus().dur) { + + assert(db.serverStatus().dur.ageOutJournalFiles != false); + + db.adminCommand({ setParameter: 1, ageOutJournalFiles: false }); + + assert(db.serverStatus().dur.ageOutJournalFiles == false); + + db.adminCommand({ setParameter: 1, ageOutJournalFiles: true }); + + assert(db.serverStatus().dur.ageOutJournalFiles != false); + +} +else { +// print("dur is off"); +} \ No newline at end of file diff --git a/jstests/and.js b/jstests/and.js new file mode 100644 index 0000000..bd6dbcd --- /dev/null +++ b/jstests/and.js @@ -0,0 +1,86 @@ +// Some tests for $and SERVER-1089 + +t = db.jstests_and; +t.drop(); + +t.save( {a:[1,2]} ); +t.save( {a:'foo'} ); + +function check() { + // $and must be an array + assert.throws( function() { t.find( {$and:4} ).toArray() } ); + // $and array must not be empty + assert.throws( function() { t.find( {$and:[]} ).toArray() } ); + // $and elements must be objects + assert.throws( function() { t.find( {$and:[4]} ).toArray() } ); + + // Check equality matching + assert.eq( 1, t.count( {$and:[{a:1}]} ) ); + assert.eq( 1, t.count( {$and:[{a:1},{a:2}]} ) ); + assert.eq( 0, t.count( {$and:[{a:1},{a:3}]} ) ); + assert.eq( 0, t.count( {$and:[{a:1},{a:2},{a:3}]} ) ); + assert.eq( 1, t.count( {$and:[{a:'foo'}]} ) ); + assert.eq( 0, t.count( {$and:[{a:'foo'},{a:'g'}]} ) ); + + // Check $and with other fields + assert.eq( 1, t.count( {a:2,$and:[{a:1}]} ) ); + assert.eq( 0, t.count( {a:0,$and:[{a:1}]} ) ); + assert.eq( 0, t.count( {a:2,$and:[{a:0}]} ) ); + assert.eq( 1, t.count( {a:1,$and:[{a:1}]} ) ); + + // Check recursive $and + assert.eq( 1, t.count( {a:2,$and:[{$and:[{a:1}]}]} ) ); + assert.eq( 0, t.count( {a:0,$and:[{$and:[{a:1}]}]} ) ); + assert.eq( 0, t.count( {a:2,$and:[{$and:[{a:0}]}]} ) ); + assert.eq( 1, t.count( {a:1,$and:[{$and:[{a:1}]}]} ) ); + + assert.eq( 1, t.count( {$and:[{a:2},{$and:[{a:1}]}]} ) ); + assert.eq( 0, t.count( {$and:[{a:0},{$and:[{a:1}]}]} ) ); + assert.eq( 0, t.count( {$and:[{a:2},{$and:[{a:0}]}]} ) ); + assert.eq( 1, t.count( {$and:[{a:1},{$and:[{a:1}]}]} ) ); + + // Some of these cases were more important with an alternative $and syntax + // that was rejected, but they're still valid checks. + + // Check simple regex + assert.eq( 1, t.count( {$and:[{a:/foo/}]} ) ); + // Check multiple regexes + assert.eq( 1, t.count( {$and:[{a:/foo/},{a:/^f/},{a:/o/}]} ) ); + assert.eq( 0, t.count( {$and:[{a:/foo/},{a:/^g/}]} ) ); + assert.eq( 1, t.count( {$and:[{a:/^f/},{a:'foo'}]} ) ); + // Check regex flags + assert.eq( 0, t.count( {$and:[{a:/^F/},{a:'foo'}]} ) ); + assert.eq( 1, t.count( {$and:[{a:/^F/i},{a:'foo'}]} ) ); + + + + // Check operator + assert.eq( 1, t.count( {$and:[{a:{$gt:0}}]} ) ); + + // Check where + assert.eq( 1, t.count( {a:'foo',$where:'this.a=="foo"'} ) ); + assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) ); + assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) ); + + // Nested where ok + assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}]}) ); + assert.eq( 1, t.count({$and:[{a:'foo'},{$where:'this.a=="foo"'}]}) ); + assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}],$where:'this.a=="foo"'}) ); +} + +check(); +t.ensureIndex( {a:1} ); +check(); +var e = t.find( {$and:[{a:1}]} ).explain(); +assert.eq( 'BtreeCursor a_1', e.cursor ); +assert.eq( [[1,1]], e.indexBounds.a ); + +function checkBounds( query ) { + var e = t.find( query ).explain(); + assert.eq( 1, e.n ); + assert.eq( [[1,1]], e.indexBounds.a ); +} + +// Since this is a multikey index, we get the bounds from the first constraint scanned. +checkBounds( {a:1,$and:[{a:2}]} ); +checkBounds( {$and:[{a:1},{a:2}]} ); diff --git a/jstests/and2.js b/jstests/and2.js new file mode 100644 index 0000000..0bd13eb --- /dev/null +++ b/jstests/and2.js @@ -0,0 +1,27 @@ +// Test dollar sign operator with $and SERVER-1089 + +t = db.jstests_and2; + +t.drop(); +t.save( {a:[1,2]} ); +t.update( {a:1}, {$set:{'a.$':5}} ); +assert.eq( [5,2], t.findOne().a ); + +t.drop(); +t.save( {a:[1,2]} ); +t.update( {$and:[{a:1}]}, {$set:{'a.$':5}} ); +assert.eq( [5,2], t.findOne().a ); + +// Make sure dollar sign operator with $and is consistent with no $and case +t.drop(); +t.save( {a:[1,2],b:[3,4]} ); +t.update( {a:1,b:4}, {$set:{'a.$':5}} ); +// Probably not what we want here, just trying to make sure $and is consistent +assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] ); + +// Make sure dollar sign operator with $and is consistent with no $and case +t.drop(); +t.save( {a:[1,2],b:[3,4]} ); +t.update( {a:1,$and:[{b:4}]}, {$set:{'a.$':5}} ); +// Probably not what we want here, just trying to make sure $and is consistent +assert.eq( {a:[1,5],b:[3,4]}, t.find( {}, {_id:0} ).toArray()[ 0 ] ); diff --git a/jstests/and3.js b/jstests/and3.js new file mode 100644 index 0000000..98a0974 --- /dev/null +++ b/jstests/and3.js @@ -0,0 +1,66 @@ +// Check key match with sub matchers - part of SERVER-3192 + +t = db.jstests_and3; +t.drop(); + +t.save( {a:1} ); +t.save( {a:'foo'} ); + +t.ensureIndex( {a:1} ); + +function checkScanMatch( query, nscannedObjects, n ) { + var e = t.find( query ).hint( {a:1} ).explain(); + // NOTE The nscannedObjects values aren't necessarily optimal currently, + // we're just checking current behavior here. + assert.eq( nscannedObjects, e.nscannedObjects ); + assert.eq( n, e.n ); +} + +checkScanMatch( {a:/o/}, 1, 1 ); +checkScanMatch( {a:/a/}, 0, 0 ); +checkScanMatch( {a:{$not:/o/}}, 2, 1 ); +checkScanMatch( {a:{$not:/a/}}, 2, 2 ); + +checkScanMatch( {$and:[{a:/o/}]}, 1, 1 ); +checkScanMatch( {$and:[{a:/a/}]}, 0, 0 ); +checkScanMatch( {$and:[{a:{$not:/o/}}]}, 2, 1 ); +checkScanMatch( {$and:[{a:{$not:/a/}}]}, 2, 2 ); +checkScanMatch( {$and:[{a:/o/},{a:{$not:/o/}}]}, 1, 0 ); +checkScanMatch( {$and:[{a:/o/},{a:{$not:/a/}}]}, 1, 1 ); +checkScanMatch( {$or:[{a:/o/}]}, 1, 1 ); +checkScanMatch( {$or:[{a:/a/}]}, 0, 0 ); +checkScanMatch( {$nor:[{a:/o/}]}, 2, 1 ); +checkScanMatch( {$nor:[{a:/a/}]}, 2, 2 ); + +checkScanMatch( {$and:[{$and:[{a:/o/}]}]}, 1, 1 ); +checkScanMatch( {$and:[{$and:[{a:/a/}]}]}, 0, 0 ); +checkScanMatch( {$and:[{$and:[{a:{$not:/o/}}]}]}, 2, 1 ); +checkScanMatch( {$and:[{$and:[{a:{$not:/a/}}]}]}, 2, 2 ); +checkScanMatch( {$and:[{$or:[{a:/o/}]}]}, 1, 1 ); +checkScanMatch( {$and:[{$or:[{a:/a/}]}]}, 0, 0 ); +checkScanMatch( {$or:[{a:{$not:/o/}}]}, 2, 1 ); +checkScanMatch( {$and:[{$or:[{a:{$not:/o/}}]}]}, 2, 1 ); +checkScanMatch( {$and:[{$or:[{a:{$not:/a/}}]}]}, 2, 2 ); +checkScanMatch( {$and:[{$nor:[{a:/o/}]}]}, 2, 1 ); +checkScanMatch( {$and:[{$nor:[{a:/a/}]}]}, 2, 2 ); + +checkScanMatch( {$where:'this.a==1'}, 2, 1 ); +checkScanMatch( {$and:[{$where:'this.a==1'}]}, 2, 1 ); + +checkScanMatch( {a:1,$where:'this.a==1'}, 1, 1 ); +checkScanMatch( {a:1,$and:[{$where:'this.a==1'}]}, 1, 1 ); +checkScanMatch( {$and:[{a:1},{$where:'this.a==1'}]}, 1, 1 ); +checkScanMatch( {$and:[{a:1,$where:'this.a==1'}]}, 1, 1 ); +checkScanMatch( {a:1,$and:[{a:1},{a:1,$where:'this.a==1'}]}, 1, 1 ); + +function checkImpossibleMatch( query ) { + var e = t.find( query ).explain(); + assert.eq( 0, e.n ); + assert.eq( 'BasicCursor', e.cursor ); +} + +// With a single key index, all bounds are utilized. +assert.eq( [[1,1]], t.find( {$and:[{a:1}]} ).explain().indexBounds.a ); +assert.eq( [[1,1]], t.find( {a:1,$and:[{a:1}]} ).explain().indexBounds.a ); +checkImpossibleMatch( {a:1,$and:[{a:2}]} ); +checkImpossibleMatch( {$and:[{a:1},{a:2}]} ); diff --git a/jstests/andor.js b/jstests/andor.js new file mode 100644 index 0000000..fae6ee4 --- /dev/null +++ b/jstests/andor.js @@ -0,0 +1,105 @@ +// SERVER-1089 Test and/or nesting + +t = db.jstests_andor; +t.drop(); + +// not ok +function ok( q ) { + assert.eq( 1, t.find( q ).itcount() ); +} + +t.save( {a:1} ); + +test = function() { + + ok( {a:1} ); + + ok( {$and:[{a:1}]} ); + ok( {$or:[{a:1}]} ); + + ok( {$and:[{$and:[{a:1}]}]} ); + ok( {$or:[{$or:[{a:1}]}]} ); + + ok( {$and:[{$or:[{a:1}]}]} ); + ok( {$or:[{$and:[{a:1}]}]} ); + + ok( {$and:[{$and:[{$or:[{a:1}]}]}]} ); + ok( {$and:[{$or:[{$and:[{a:1}]}]}]} ); + ok( {$or:[{$and:[{$and:[{a:1}]}]}]} ); + + ok( {$or:[{$and:[{$or:[{a:1}]}]}]} ); + + // now test $nor + + ok( {$and:[{a:1}]} ); + ok( {$nor:[{a:2}]} ); + + ok( {$and:[{$and:[{a:1}]}]} ); + ok( {$nor:[{$nor:[{a:1}]}]} ); + + ok( {$and:[{$nor:[{a:2}]}]} ); + ok( {$nor:[{$and:[{a:2}]}]} ); + + ok( {$and:[{$and:[{$nor:[{a:2}]}]}]} ); + ok( {$and:[{$nor:[{$and:[{a:2}]}]}]} ); + ok( {$nor:[{$and:[{$and:[{a:2}]}]}]} ); + + ok( {$nor:[{$and:[{$nor:[{a:1}]}]}]} ); + +} + +test(); +t.ensureIndex( {a:1} ); +test(); + +// Test an inequality base match. + +test = function() { + + ok( {a:{$ne:2}} ); + + ok( {$and:[{a:{$ne:2}}]} ); + ok( {$or:[{a:{$ne:2}}]} ); + + ok( {$and:[{$and:[{a:{$ne:2}}]}]} ); + ok( {$or:[{$or:[{a:{$ne:2}}]}]} ); + + ok( {$and:[{$or:[{a:{$ne:2}}]}]} ); + ok( {$or:[{$and:[{a:{$ne:2}}]}]} ); + + ok( {$and:[{$and:[{$or:[{a:{$ne:2}}]}]}]} ); + ok( {$and:[{$or:[{$and:[{a:{$ne:2}}]}]}]} ); + ok( {$or:[{$and:[{$and:[{a:{$ne:2}}]}]}]} ); + + ok( {$or:[{$and:[{$or:[{a:{$ne:2}}]}]}]} ); + + // now test $nor + + ok( {$and:[{a:{$ne:2}}]} ); + ok( {$nor:[{a:{$ne:1}}]} ); + + ok( {$and:[{$and:[{a:{$ne:2}}]}]} ); + ok( {$nor:[{$nor:[{a:{$ne:2}}]}]} ); + + ok( {$and:[{$nor:[{a:{$ne:1}}]}]} ); + ok( {$nor:[{$and:[{a:{$ne:1}}]}]} ); + + ok( {$and:[{$and:[{$nor:[{a:{$ne:1}}]}]}]} ); + ok( {$and:[{$nor:[{$and:[{a:{$ne:1}}]}]}]} ); + ok( {$nor:[{$and:[{$and:[{a:{$ne:1}}]}]}]} ); + + ok( {$nor:[{$and:[{$nor:[{a:{$ne:2}}]}]}]} ); + +} + +t.drop(); +t.save( {a:1} ); +test(); +t.ensureIndex( {a:1} ); +test(); + +t.drop(); +t.ensureIndex( {a:1} ); +var e = t.find( {$and:[{a:1}]} ).explain(); +// nested $or clauses currently ignored for indexing +assert.eq( e.indexBounds, t.find( {$and:[{a:1,$or:[{a:2}]}]} ).explain().indexBounds ); diff --git a/jstests/apitest_dbcollection.js b/jstests/apitest_dbcollection.js index f6e74da..0983b06 100644 --- a/jstests/apitest_dbcollection.js +++ b/jstests/apitest_dbcollection.js @@ -55,7 +55,7 @@ if( v.ns != "test.test_db" ) { assert (v.ns == "test.test_db",9); assert (v.ok == 1,10); -assert(v.result.toString().match(/nrecords\?:(\d+)/)[1] == 100,11); +assert.eq(100,v.nrecords,11) /* * test deleteIndex, deleteIndexes diff --git a/jstests/array_match2.js b/jstests/array_match2.js new file mode 100644 index 0000000..d64ca1b --- /dev/null +++ b/jstests/array_match2.js @@ -0,0 +1,25 @@ +// Different recursive array match cases SERVER-2898 + +t = db.jstests_array_match2; +t.drop(); + +t.save( {a:[{1:4},5]} ); +// When the array index is the last field, both of these match types work. +assert.eq( 1, t.count( {'a.1':4} ) ); +assert.eq( 1, t.count( {'a.1':5} ) ); + +t.remove(); +// When the array index is not the last field, only one of the match types works. +t.save( {a:[{1:{foo:4}},{foo:5}]} ); +if ( 0 ) { // SERVER-2898 +assert.eq( 1, t.count( {'a.1.foo':4} ) ); +} +assert.eq( 1, t.count( {'a.1.foo':5} ) ); + +// Same issue with the $exists operator +t.remove(); +t.save( {a:[{1:{foo:4}},{}]} ); +assert.eq( 1, t.count( {'a.1':{$exists:true}} ) ); +if ( 0 ) { // SERVER-2898 +assert.eq( 1, t.count( {'a.1.foo':{$exists:true}} ) ); +} diff --git a/jstests/array_match3.js b/jstests/array_match3.js new file mode 100644 index 0000000..c865343 --- /dev/null +++ b/jstests/array_match3.js @@ -0,0 +1,13 @@ +// SERVER-2902 Test indexing of numerically referenced array elements. + +t = db.jstests_array_match3; +t.drop(); + +// Test matching numericallly referenced array element. +t.save( {a:{'0':5}} ); +t.save( {a:[5]} ); +assert.eq( 2, t.count( {'a.0':5} ) ); + +// Test with index. +t.ensureIndex( {'a.0':1} ); +assert.eq( 2, t.count( {'a.0':5} ) ); diff --git a/jstests/arrayfind2.js b/jstests/arrayfind2.js index 94d77f1..1e63bf6 100644 --- a/jstests/arrayfind2.js +++ b/jstests/arrayfind2.js @@ -32,4 +32,5 @@ assert.eq( {"a.x":[[3,3]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3 } }, t.ensureIndex( { "a.x":1,"a.y":-1 } ); -assert.eq( {"a.x":[[3,3]],"a.y":[[1.7976931348623157e+308,4]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds ); +// TODO Index bounds below for elemMatch could be improved. - SERVER-3104 +assert.eq( {"a.x":[[3,3]],"a.y":[[{$maxElement:1},{$minElement:1}]]}, t.find( { a : { $all : [ { $elemMatch : { x : 3, y : { $gt: 4 } } } ] } } ).explain().indexBounds ); diff --git a/jstests/arrayfind4.js b/jstests/arrayfind4.js new file mode 100644 index 0000000..b141425 --- /dev/null +++ b/jstests/arrayfind4.js @@ -0,0 +1,22 @@ +// Test query empty array SERVER-2258 + +t = db.jstests_arrayfind4; +t.drop(); + +t.save( {a:[]} ); +t.ensureIndex( {a:1} ); + +assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() ); + +assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() ); + +t.remove(); +t.save( {a:[[]]} ); + +assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() ); + +assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() ); diff --git a/jstests/arrayfind5.js b/jstests/arrayfind5.js new file mode 100644 index 0000000..083dc06 --- /dev/null +++ b/jstests/arrayfind5.js @@ -0,0 +1,23 @@ +// Test indexed elemmatch of missing field. + +t = db.jstests_arrayfind5; +t.drop(); + +function check( nullElemMatch ) { + assert.eq( 1, t.find( {'a.b':1} ).itcount() ); + assert.eq( 1, t.find( {a:{$elemMatch:{b:1}}} ).itcount() ); + assert.eq( 0, t.find( {'a.b':null} ).itcount() ); + assert.eq( nullElemMatch ? 1 : 0, t.find( {a:{$elemMatch:{b:null}}} ).itcount() ); // see SERVER-3377 +} + +t.save( {a:[{},{b:1}]} ); +check( true ); +t.ensureIndex( {'a.b':1} ); +check( true ); + +t.drop(); + +t.save( {a:[5,{b:1}]} ); +check( false ); +t.ensureIndex( {'a.b':1} ); +check( false ); diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js index 2f2a1b4..c837085 100644 --- a/jstests/auth/auth1.js +++ b/jstests/auth/auth1.js @@ -26,6 +26,11 @@ for( i = 0; i < 999; ++i ) { assert.eq( 999, t.count() , "A1" ); assert.eq( 999, t.find().toArray().length , "A2" ); +db.setProfilingLevel( 2 ); +t.count(); +db.setProfilingLevel( 0 ); +assert.lt( 0 , db.system.profile.find( { user : "eliot" } ).count() , "AP1" ) + assert.eq( 999, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A3" ); db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:999} ) } ); assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "A4" ); diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js new file mode 100644 index 0000000..4f30894 --- /dev/null +++ b/jstests/auth/auth2.js @@ -0,0 +1,23 @@ +// test read/write permissions + +port = allocatePorts( 1 )[ 0 ]; +baseName = "jstests_auth_auth2"; + +m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" , "--nojournal" , "--smallfiles" ); +db = m.getDB( "admin" ); + +t = db[ baseName ]; +t.drop(); + +users = db.getCollection( "system.users" ); +assert.eq( 0 , users.count() ); + +db.addUser( "eliot" , "eliot" ); + +assert.throws( function(){ db.users.count(); } ) + +assert.throws( function() { db.shutdownServer(); } ) + +db.auth( "eliot" , "eliot" ) + +db.shutdownServer(); diff --git a/jstests/auth/rename.js b/jstests/auth/rename.js new file mode 100644 index 0000000..5411298 --- /dev/null +++ b/jstests/auth/rename.js @@ -0,0 +1,40 @@ +// test renameCollection with auth + +port = allocatePorts( 1 )[ 0 ]; + +baseName = "jstests_rename_auth"; +m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface" ); + +db1 = m.getDB( baseName ) +db2 = m.getDB( baseName + '_other' ) +admin = m.getDB( 'admin' ) + +// auth not yet checked since we are on localhost +db1.addUser( "foo", "bar" ); +db2.addUser( "bar", "foo" ); + +printjson(db1.a.count()); +db1.a.save({}); +assert.eq(db1.a.count(), 1); + +//this makes auth required on localhost +admin.addUser('not', 'used'); + +// can't run same db w/o auth +assert.commandFailed( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) ); + +// can run same db with auth +db1.auth('foo', 'bar') +assert.commandWorked( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) ); + +// can't run diff db w/o auth +assert.commandFailed( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) ); + +// can run diff db with auth +db2.auth('bar', 'foo'); +assert.commandWorked( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) ); + +// test post conditions +assert.eq(db1.a.count(), 0); +assert.eq(db1.b.count(), 0); +assert.eq(db2.a.count(), 1); diff --git a/jstests/auth1.js b/jstests/auth1.js index ce0159b..a2cc48a 100644 --- a/jstests/auth1.js +++ b/jstests/auth1.js @@ -38,3 +38,20 @@ pass = "a" + Math.random(); db2.addUser( "eliot" , pass ); assert.commandFailed( db2.runCommand( { authenticate: 1, user: "eliot", nonce: "foo", key: "bar" } ) ); + +// check sanity check SERVER-3003 + +before = db2.system.users.count() + +assert.throws( function(){ + db2.addUser( "" , "abc" ) +} , null , "C1" ) + +assert.throws( function(){ + db2.addUser( "abc" , "" ) +} , null , "C2" ) + + +after = db2.system.users.count() +assert( before > 0 , "C3" ) +assert.eq( before , after , "C4" ) diff --git a/jstests/auth2.js b/jstests/auth2.js index 9b6dfad..9c2b38f 100644 --- a/jstests/auth2.js +++ b/jstests/auth2.js @@ -2,4 +2,8 @@ // SERVER-724 db.runCommand({logout : 1}); -db.runCommand({logout : 1}); +x = db.runCommand({logout : 1}); +assert.eq( 1 , x.ok , "A" ) + +x = db.logout(); +assert.eq( 1 , x.ok , "B" ) diff --git a/jstests/bench_test1.js b/jstests/bench_test1.js new file mode 100644 index 0000000..c32b37d --- /dev/null +++ b/jstests/bench_test1.js @@ -0,0 +1,16 @@ + +t = db.bench_test1; +t.drop(); + +t.insert( { _id : 1 , x : 1 } ) +t.insert( { _id : 2 , x : 1 } ) + +ops = [ + { op : "findOne" , ns : t.getFullName() , query : { _id : 1 } } , + { op : "update" , ns : t.getFullName() , query : { _id : 1 } , update : { $inc : { x : 1 } } } +] + +seconds = .7 + +res = benchRun( { ops : ops , parallel : 2 , seconds : seconds , host : db.getMongo().host } ) +assert.lte( seconds * res.update , t.findOne( { _id : 1 } ).x , "A1" ) diff --git a/jstests/bench_test2.js b/jstests/bench_test2.js new file mode 100644 index 0000000..4a69c9c --- /dev/null +++ b/jstests/bench_test2.js @@ -0,0 +1,41 @@ + +t = db.bench_test2 +t.drop(); + +for ( i=0; i<100; i++ ) + t.insert( { _id : i , x : 0 } ); +db.getLastError(); + +res = benchRun( { ops : [ { ns : t.getFullName() , + op : "update" , + query : { _id : { "#RAND_INT" : [ 0 , 100 ] } } , + update : { $inc : { x : 1 } } } ] , + parallel : 2 , + seconds : 1 , + totals : true , + host : db.getMongo().host } ) +printjson( res ); + +sumsq = 0 +sum = 0 + +min = 1000 +max = 0; +t.find().forEach( + function(z){ + sum += z.x; + sumsq += Math.pow( ( res.update / 100 ) - z.x , 2 ); + min = Math.min( z.x , min ); + max = Math.max( z.x , max ); + } +) + +avg = sum / 100 +std = Math.sqrt( sumsq / 100 ) + +print( "Avg: " + avg ) +print( "Std: " + std ) +print( "Min: " + min ) +print( "Max: " + max ) + + diff --git a/jstests/big_object1.js b/jstests/big_object1.js index be841e0..6bbe115 100644 --- a/jstests/big_object1.js +++ b/jstests/big_object1.js @@ -44,3 +44,5 @@ if ( db.adminCommand( "buildinfo" ).bits == 64 ){ else { print( "skipping big_object1 b/c not 64-bit" ) } + +print("SUCCESS"); diff --git a/jstests/binData.js b/jstests/binData.js new file mode 100644 index 0000000..3f03765 --- /dev/null +++ b/jstests/binData.js @@ -0,0 +1,14 @@ + +var x = new BinData(3, "OEJTfmD8twzaj/LPKLIVkA=="); +assert.eq(x.hex(), "3842537e60fcb70cda8ff2cf28b21590", "bad hex"); +assert.eq(x.base64(), "OEJTfmD8twzaj/LPKLIVkA==", "bad base64"); +assert.eq(x.type, 3, "bad type"); +assert.eq(x.length(), 16, "bad length"); + +x = new BinData(0, "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="); +assert.eq(x.hex(), "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869732073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c20746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c652067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c20706c6561737572652e", "bad hex"); +assert.eq(x.base64(), "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=", "bad base64"); +assert.eq(x.type, 0, "bad type"); +assert.eq(x.length(), 269, "bad length"); + + diff --git a/jstests/capped.js b/jstests/capped.js index bae7472..6fdc4df 100644 --- a/jstests/capped.js +++ b/jstests/capped.js @@ -1,11 +1,11 @@ db.jstests_capped.drop(); db.createCollection("jstests_capped", {capped:true, size:30000}); -assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count() ); + +assert.eq( 0, db.system.indexes.find( {ns:"test.jstests_capped"} ).count(), "expected a count of zero indexes for new capped collection" ); t = db.jstests_capped; t.save({x:1}); t.save({x:2}); -assert( t.find().sort({$natural:1})[0].x == 1 ); -assert( t.find().sort({$natural:-1})[0].x == 2 ); - +assert( t.find().sort({$natural:1})[0].x == 1 , "expected obj.x==1"); +assert( t.find().sort({$natural:-1})[0].x == 2, "expected obj.x == 2"); diff --git a/jstests/capped2.js b/jstests/capped2.js index 2d2f6a8..65bb82f 100644 --- a/jstests/capped2.js +++ b/jstests/capped2.js @@ -8,7 +8,7 @@ function debug( x ) { var val = new Array( 2000 ); var c = ""; -for( i = 0; i < 2000; ++i, c += "-" ) { +for( i = 0; i < 2000; ++i, c += "---" ) { // bigger and bigger objects through the array... val[ i ] = { a: c }; } @@ -47,16 +47,16 @@ function checkDecreasing( i ) { for( i = 0 ;; ++i ) { debug( "capped 2: " + i ); - tzz.save( val[ i ] ); + tzz.insert( val[ i ] ); if ( tzz.count() == 0 ) { - assert( i > 100, "K" ); - break; + assert( i > 100, "K" ); + break; } checkIncreasing( i ); } for( i = 600 ; i >= 0 ; --i ) { debug( "capped 2: " + i ); - tzz.save( val[ i ] ); + tzz.insert( val[ i ] ); checkDecreasing( i ); } diff --git a/jstests/capped5.js b/jstests/capped5.js index 1c7ec3d..be6c27d 100644 --- a/jstests/capped5.js +++ b/jstests/capped5.js @@ -4,12 +4,11 @@ tn = "capped5" t = db[tn] t.drop(); + db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } ); t.insert( { _id : 5 , x : 11 , z : 52 } ); - assert.eq( 0 , t.getIndexKeys().length , "A0" ) assert.eq( 52 , t.findOne( { x : 11 } ).z , "A1" ); -assert.eq( 52 , t.findOne( { _id : 5 } ).z , "A2" ); t.ensureIndex( { _id : 1 } ) t.ensureIndex( { x : 1 } ) @@ -41,10 +40,10 @@ t.ensureIndex( { x:1 }, {unique:true, dropDups:true } ); assert.eq( 1, db.system.indexes.count( {ns:"test."+tn} ) ); assert.eq( 2, t.find().hint( {x:1} ).toArray().length ); -// SERVER-525 +// SERVER-525 (closed) unique indexes in capped collection t.drop(); db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } ); -t.ensureIndex( { _id:1 } ); +t.ensureIndex( { _id:1 } ); // note we assume will be automatically unique because it is _id t.insert( { _id : 5 , x : 11 } ); t.insert( { _id : 5 , x : 12 } ); assert.eq( 1, t.find().toArray().length ); diff --git a/jstests/capped6.js b/jstests/capped6.js index 6579807..098f667 100644 --- a/jstests/capped6.js +++ b/jstests/capped6.js @@ -52,7 +52,7 @@ var max = 0; */ function doTest() { for( var i = max; i < oldMax; ++i ) { - tzz.save( val[ i ] ); + tzz.insert( val[ i ] ); } max = oldMax; count = tzz.count(); diff --git a/jstests/capped8.js b/jstests/capped8.js index cce0eec..e5b28dc 100644 --- a/jstests/capped8.js +++ b/jstests/capped8.js @@ -9,25 +9,39 @@ function debug( x ) { } /** Generate an object with a string field of specified length */ -function obj( size ) { - return {a:new Array( size + 1 ).toString()};; +function obj( size, x ) { + return {X:x, a:new Array( size + 1 ).toString()};; } function withinOne( a, b ) { assert( Math.abs( a - b ) <= 1, "not within one: " + a + ", " + b ) } +var X = 0; + /** * Insert enough documents of the given size spec that the collection will * contain only documents having this size spec. */ -function insertMany( size ) { +function insertManyRollingOver( objsize ) { // Add some variability, as the precise number can trigger different cases. - n = 250 + Random.randInt( 10 ); + X++; + n = 250 + Random.randInt(10); + + assert(t.count() == 0 || t.findOne().X != X); + for( i = 0; i < n; ++i ) { - t.save( obj( size ) ); + t.save( obj( objsize, X ) ); debug( t.count() ); } + + if (t.findOne().X != X) { + printjson(t.findOne()); + print("\n\nERROR didn't roll over in insertManyRollingOver " + objsize); + print("approx amountwritten: " + (objsize * n)); + printjson(t.stats()); + assert(false); + } } /** @@ -37,10 +51,10 @@ function insertMany( size ) { function insertAndTruncate( first ) { myInitialCount = t.count(); // Insert enough documents to make the capped allocation loop over. - insertMany( 50 ); + insertManyRollingOver( 150 ); myFiftyCount = t.count(); // Insert documents that are too big to fit in the smaller extents. - insertMany( 2000 ); + insertManyRollingOver( 5000 ); myTwokCount = t.count(); if ( first ) { initialCount = myInitialCount; @@ -69,18 +83,24 @@ function testTruncate() { insertAndTruncate( false ); } +var pass = 1; + +print("pass " + pass++); t.drop(); -db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 1000 ] } ); +db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 4000 ] } ); testTruncate(); +print("pass " + pass++); t.drop(); -db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 1000 ] } ); +db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 4000 ] } ); testTruncate(); +print("pass " + pass++); t.drop(); -db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000 ] } ); +db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 4000 ] } ); testTruncate(); +print("pass " + pass++); t.drop(); db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } ); testTruncate(); diff --git a/jstests/capped9.js b/jstests/capped9.js new file mode 100644 index 0000000..9ea506c --- /dev/null +++ b/jstests/capped9.js @@ -0,0 +1,28 @@ + +t = db.capped9; +t.drop(); + +db.createCollection("capped9" , {capped:true, size:1024*50 }); + +t.insert( { _id : 1 , x : 2 , y : 3 } ) + +assert.eq( 1 , t.find( { x : 2 } ).itcount() , "A1" ) +assert.eq( 1 , t.find( { y : 3 } ).itcount() , "A2" ) +//assert.throws( function(){ t.find( { _id : 1 } ).itcount(); } , [] , "A3" ); // SERVER-3064 + +t.update( { _id : 1 } , { $set : { y : 4 } } ) +//assert( db.getLastError() , "B1" ); // SERVER-3064 +//assert.eq( 3 , t.findOne().y , "B2" ); // SERVER-3064 + +t.ensureIndex( { _id : 1 } ) + +assert.eq( 1 , t.find( { _id : 1 } ).itcount() , "D1" ) + +t.update( { _id : 1 } , { $set : { y : 4 } } ) +assert( null == db.getLastError() , "D1: " + tojson( db.getLastError() ) ) +assert.eq( 4 , t.findOne().y , "D2" ) + + + + + diff --git a/jstests/cappeda.js b/jstests/cappeda.js new file mode 100644 index 0000000..4a4b14a --- /dev/null +++ b/jstests/cappeda.js @@ -0,0 +1,33 @@ + +t = db.scan_capped_id; +t.drop() + +x = t.runCommand( "create" , { capped : true , size : 10000 } ) +assert( x.ok ) + +for ( i=0; i<100; i++ ) + t.insert( { _id : i , x : 1 } ) + +function q() { + return t.findOne( { _id : 5 } ) +} + +function u() { + t.update( { _id : 5 } , { $set : { x : 2 } } ); + var gle = db.getLastError(); + if ( gle ) + throw gle; +} + + +// SERVER-3064 +//assert.throws( q , [] , "A1" ); +//assert.throws( u , [] , "B1" ); + +t.ensureIndex( { _id : 1 } ) + +assert.eq( 1 , q().x ) +q() +u() + +assert.eq( 2 , q().x ) diff --git a/jstests/compact.js b/jstests/compact.js new file mode 100644 index 0000000..b12b03f --- /dev/null +++ b/jstests/compact.js @@ -0,0 +1,37 @@ +// compact.js + +t = db.compacttest; +t.drop(); +t.insert({ x: 3 }); +t.insert({ x: 3 }); +t.insert({ x: 5 }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.insert({ x: 4, z: 2, k: 'aaa' }); +t.ensureIndex({ x: 1 }); + +print("1"); + +var res = db.runCommand({ compact: 'compacttest', dev: true }); +printjson(res); +assert(res.ok); +assert(t.count() == 9); +var v = t.validate(true); +assert(v.ok); +assert(v.extentCount == 1); +assert(v.deletedCount == 1); +assert(t.getIndexes().length == 2); + +print("2"); + +// works on an empty collection? +t.remove({}); +assert(db.runCommand({ compact: 'compacttest', dev: true }).ok); +assert(t.count() == 0); +v = t.validate(true); +assert(v.ok); +assert(v.extentCount == 1); +assert(t.getIndexes().length == 2); diff --git a/jstests/compact_speed_test.js b/jstests/compact_speed_test.js new file mode 100755 index 0000000..0c4b9d5 --- /dev/null +++ b/jstests/compact_speed_test.js @@ -0,0 +1,61 @@ +if (1) { + + t = db.compactspeedtest; + t.drop(); + + var obj = { x: 1, y: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", z: [1, 2] }; + + var start = new Date(); + function timed() { + db.getLastError(); + var dt = (new Date()) - start; + //print("time: " + dt); + start = new Date(); + return dt; + } + + //print("adding data"); + var N = 100000; + if (db.adminCommand("buildInfo").debug) + N = 10000; + for (var i = 0; i < N; i++) { + obj.x = i; + obj.z[1] = i; + t.insert(obj); + } + var a = timed(); + + //print("index"); + t.ensureIndex({ x: 1 }); + //print("index"); + t.ensureIndex({ y: 1 }); + //print("index"); + t.ensureIndex({ z: 1 }); + + a += timed(); + + //print("count:" + t.count()); + + timed(); + + { + //print("compact"); + var res = db.runCommand({ compact: 'compactspeedtest', dev: true }); + b = timed(); + //printjson(res); + assert(res.ok); + + //print("validate"); + var v = t.validate(true); + + assert(v.ok); + assert(t.getIndexes().length == 4); + + if (b < a) { + // consider making this fail/assert + print("\n\n\nwarning WARNING compact command was slower than it should be"); + print("a:" + a + " b:" + b); + print("\n\n\n"); + } + } +} diff --git a/jstests/date1.js b/jstests/date1.js index ca2e616..e6fc147 100644 --- a/jstests/date1.js +++ b/jstests/date1.js @@ -4,11 +4,14 @@ t = db.date1; function go( d , msg ){ t.drop(); - t.save( { a : 1 , d : d } ); + t.save({ a: 1, d: d }); +// printjson(d); +// printjson(t.findOne().d); assert.eq( d , t.findOne().d , msg ) } go( new Date() , "A" ) go( new Date( 1 ) , "B") go( new Date( 0 ) , "C (old spidermonkey lib fails this test)") +go(new Date(-10), "neg") diff --git a/jstests/date2.js b/jstests/date2.js new file mode 100644 index 0000000..94eb58e --- /dev/null +++ b/jstests/date2.js @@ -0,0 +1,13 @@ +// Check that it's possible to compare a Date to a Timestamp - SERVER-3304 +// Check Date / Timestamp comparison equivalence - SERVER-3222 + +t = db.jstests_date2; +t.drop(); + +t.ensureIndex( {a:1} ); + +t.save( {a:new Timestamp()} ); + +if ( 0 ) { // SERVER-3304 +assert.eq( 1, t.find( {a:{$gt:new Date(0)}} ).itcount() ); +} \ No newline at end of file diff --git a/jstests/date3.js b/jstests/date3.js new file mode 100644 index 0000000..e7ddf71 --- /dev/null +++ b/jstests/date3.js @@ -0,0 +1,31 @@ +// Check dates before Unix epoch - SERVER-405 + +t = db.date3; +t.drop() + +d1 = new Date(-1000) +dz = new Date(0) +d2 = new Date(1000) + +t.save( {x: 3, d: dz} ) +t.save( {x: 2, d: d2} ) +t.save( {x: 1, d: d1} ) + +function test () { + var list = t.find( {d: {$lt: dz}} ) + assert.eq ( 1, list.size() ) + assert.eq ( 1, list[0].x ) + assert.eq ( d1, list[0].d ) + var list = t.find( {d: {$gt: dz}} ) + assert.eq ( 1, list.size() ) + assert.eq ( 2, list[0].x ) + var list = t.find().sort( {d:1} ) + assert.eq ( 3, list.size() ) + assert.eq ( 1, list[0].x ) + assert.eq ( 3, list[1].x ) + assert.eq ( 2, list[2].x ) +} + +test() +t.ensureIndex( {d: 1} ) +test() diff --git a/jstests/dbcase.js b/jstests/dbcase.js index 21854d8..25c0bca 100644 --- a/jstests/dbcase.js +++ b/jstests/dbcase.js @@ -1,6 +1,5 @@ +// Check db name duplication constraint SERVER-2111 -/* -TODO SERVER-2111 a = db.getSisterDB( "dbcasetest_dbnamea" ) b = db.getSisterDB( "dbcasetest_dbnameA" ) @@ -15,11 +14,16 @@ b.foo.save( { x : 1 } ) z = db.getLastErrorObj(); assert.eq( 13297 , z.code || 0 , "B : " + tojson(z) ) -print( db.getMongo().getDBNames() ) +assert.neq( -1, db.getMongo().getDBNames().indexOf( a.getName() ) ); +assert.eq( -1, db.getMongo().getDBNames().indexOf( b.getName() ) ); +printjson( db.getMongo().getDBs().databases ); a.dropDatabase(); b.dropDatabase(); -print( db.getMongo().getDBNames() ) -*/ - +ai = db.getMongo().getDBNames().indexOf( a.getName() ); +bi = db.getMongo().getDBNames().indexOf( b.getName() ); +// One of these dbs may exist if there is a slave active, but they must +// not both exist. +assert( ai == -1 || bi == -1 ); +printjson( db.getMongo().getDBs().databases ); diff --git a/jstests/dbcase2.js b/jstests/dbcase2.js new file mode 100644 index 0000000..57e43bc --- /dev/null +++ b/jstests/dbcase2.js @@ -0,0 +1,9 @@ +// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but differently cased name. + +a = db.getSisterDB( "dbcasetest_dbnamea" ) +b = db.getSisterDB( "dbcasetest_dbnameA" ) + +a.c.count(); +assert.throws( function() { b.c.count() } ); + +assert.eq( -1, db.getMongo().getDBNames().indexOf( "dbcasetest_dbnameA" ) ); diff --git a/jstests/dbhash.js b/jstests/dbhash.js index e9cbc94..7fea4b4 100644 --- a/jstests/dbhash.js +++ b/jstests/dbhash.js @@ -14,16 +14,22 @@ db.getCollectionNames().forEach( function( x ) { } } ); +function dbhash( mydb ) { + var ret = mydb.runCommand( "dbhash" ); + assert.commandWorked( ret, "dbhash failure" ); + return ret; +} + function gh( coll , mydb ){ if ( ! mydb ) mydb = db; - var x = mydb.runCommand( "dbhash" ).collections[coll.getName()]; + var x = dbhash( mydb ).collections[coll.getName()]; if ( ! x ) return ""; return x; } function dbh( mydb ){ - return mydb.runCommand( "dbhash" ).md5; + return dbhash( mydb ).md5; } assert.eq( gh( a ) , gh( b ) , "A1" ); diff --git a/jstests/delx.js b/jstests/delx.js index 3f8c88c..aa858e9 100644 --- a/jstests/delx.js +++ b/jstests/delx.js @@ -23,6 +23,7 @@ x.next(); y.next(); a.foo.remove( { _id : { $gt : 50 } } ); +db.getLastError(); assert.eq( 51 , a.foo.find().itcount() , "B1" ) assert.eq( 100 , b.foo.find().itcount() , "B2" ) diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js index 3b65bd0..c29dea0 100644 --- a/jstests/disk/directoryperdb.js +++ b/jstests/disk/directoryperdb.js @@ -22,6 +22,8 @@ checkDir = function( dir ) { files = listFiles( dir + baseName ); for( f in files ) { + if ( files[f].isDirectory ) + continue; assert( new RegExp( baseName + "/" + baseName + "." ).test( files[ f ].name ) , "B dir:" + dir + " f: " + f ); } } diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js index 26b707d..eddb300 100644 --- a/jstests/disk/diskfull.js +++ b/jstests/disk/diskfull.js @@ -20,10 +20,16 @@ if ( doIt ) { port = allocatePorts( 1 )[ 0 ]; m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); - c = m.getDB( "diskfulltest" ).getCollection( "diskfulltest" ) + d = m.getDB( "diskfulltest" ); + c = d.getCollection( "diskfulltest" ); c.save( { a: 6 } ); + assert.eq(d.getLastError(), "new file allocation failure"); // first fail assert.soon( function() { return rawMongoProgramOutput().match( /file allocation failure/ ); }, "didn't see 'file allocation failure'" ); assert.isnull( c.findOne() , "shouldn't exist" ); + c.save( { a: 6 } ); + assert.eq(d.getLastError(), "Can't take a write lock while out of disk space"); // every following fail + + sleep( 3000 ); m2 = new Mongo( m.host ); printjson( m2.getDBs() ); diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js index 944ad1c..57ae179 100644 --- a/jstests/disk/newcollection.js +++ b/jstests/disk/newcollection.js @@ -3,11 +3,21 @@ port = allocatePorts( 1 )[ 0 ] var baseName = "jstests_disk_newcollection"; var m = startMongod( "--noprealloc", "--smallfiles", "--port", port, "--dbpath", "/data/db/" + baseName ); +//var m = db.getMongo(); db = m.getDB( "test" ); -db.createCollection( baseName, {size:15.9*1024*1024} ); -db.baseName.drop(); +var t = db[baseName]; -size = m.getDBs().totalSize; -db.baseName.save( {} ); -assert.eq( size, m.getDBs().totalSize ); +for (var pass = 0; pass <= 1; pass++) { + + db.createCollection(baseName, { size: 15.8 * 1024 * 1024 }); + if( pass == 0 ) + t.drop(); + + size = m.getDBs().totalSize; + t.save({}); + assert.eq(size, m.getDBs().totalSize); + assert(size <= 32 * 1024 * 1024); + + t.drop(); +} diff --git a/jstests/disk/norepeat.js b/jstests/disk/norepeat.js index d9f1cd3..985fc36 100644 --- a/jstests/disk/norepeat.js +++ b/jstests/disk/norepeat.js @@ -45,7 +45,7 @@ assert.throws( function() { c.next() }, [], "unexpected: object found" ); m.getDB( "local" ).getCollectionNames().forEach( function( x ) { assert( !x.match( /^temp/ ), "temp collection found" ); } ); t.drop(); -m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIdIndex:false } ); +m.getDB( baseName ).createCollection( baseName, { capped:true, size:100000, autoIndexId:false } ); t = m.getDB( baseName ).getCollection( baseName ); t.insert( {_id:"a"} ); t.insert( {_id:"a"} ); diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js new file mode 100644 index 0000000..d93e5ea --- /dev/null +++ b/jstests/disk/quota.js @@ -0,0 +1,47 @@ +// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local' database). + +port = allocatePorts( 1 )[ 0 ]; + +baseName = "jstests_disk_quota"; +dbpath = "/data/db/" + baseName; + +m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" ); +db = m.getDB( baseName ); + +big = new Array( 10000 ).toString(); + +// Insert documents until quota is exhausted. +while( !db.getLastError() ) { + db[ baseName ].save( {b:big} ); +} +printjson( db.getLastError() ); + +dotTwoDataFile = dbpath + "/" + baseName + ".2"; +files = listFiles( dbpath ); +for( i in files ) { + // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated (SERVER-3410) but no .2 file is expected. + assert.neq( dotTwoDataFile, files[ i ].name ); +} + +dotTwoDataFile = dbpath + "/" + "local" + ".2"; +// Check that quota does not apply to local db, and a .2 file can be created. +l = m.getDB( "local" )[ baseName ]; +for( i = 0; i < 10000; ++i ) { + l.save( {b:big} ); + assert( !db.getLastError() ); + dotTwoFound = false; + if ( i % 100 != 0 ) { + continue; + } + files = listFiles( dbpath ); + for( f in files ) { + if ( files[ f ].name == dotTwoDataFile ) { + dotTwoFound = true; + } + } + if ( dotTwoFound ) { + break; + } +} + +assert( dotTwoFound ); diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js new file mode 100644 index 0000000..c0d30df --- /dev/null +++ b/jstests/disk/quota2.js @@ -0,0 +1,38 @@ +// Test for quotaFiles off by one file limit issue - SERVER-3420. + +if ( 0 ) { // SERVER-3420 + +port = allocatePorts( 1 )[ 0 ]; + +baseName = "jstests_disk_quota2"; +dbpath = "/data/db/" + baseName; + +m = startMongod( "--port", port, "--dbpath", "/data/db/" + baseName, "--quotaFiles", "1", "--smallfiles" ); +db = m.getDB( baseName ); + +big = new Array( 10000 ).toString(); + +// Insert documents until quota is exhausted. +while( !db.getLastError() ) { + db[ baseName ].save( {b:big} ); +} + +db.resetError(); + +// Trigger allocation of an additional file for a 'special' namespace. +for( n = 0; !db.getLastError(); ++n ) { + db.createCollection( '' + n ); +} + +print( n ); + +// Check that new docs are saved in the .0 file. +for( i = 0; i < n; ++i ) { + c = db[ ''+i ]; + c.save( {b:big} ); + if( !db.getLastError() ) { + assert.eq( 0, c.find()._addSpecial( "$showDiskLoc", true )[ 0 ].$diskLoc.file ); + } +} + +} \ No newline at end of file diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js index c986dce..9e6767c 100644 --- a/jstests/disk/repair3.js +++ b/jstests/disk/repair3.js @@ -1,4 +1,4 @@ -// test --repairpath on aother partition +// test --repairpath on another partition var baseName = "jstests_disk_repair3"; var repairbase = "/data/db/repairpartitiontest" diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js new file mode 100644 index 0000000..65da330 --- /dev/null +++ b/jstests/disk/repair5.js @@ -0,0 +1,43 @@ +// SERVER-2351 Test killop with repair command. + +var baseName = "jstests_disk_repair5"; + +port = allocatePorts( 1 )[ 0 ]; +dbpath = "/data/db/" + baseName + "/"; +repairpath = dbpath + "repairDir/" + +resetDbpath( dbpath ); +resetDbpath( repairpath ); + +m = startMongoProgram( "mongod", "--port", port, "--dbpath", dbpath, "--repairpath", repairpath, "--nohttpinterface", "--bind_ip", "127.0.0.1" ); +db = m.getDB( baseName ); + +big = new Array( 5000 ).toString(); +for( i = 0; i < 20000; ++i ) { + db[ baseName ].save( {i:i,b:big} ); +} + +function killRepair() { + while( 1 ) { + p = db.currentOp().inprog; + for( var i in p ) { + var o = p[ i ]; + printjson( o ); + // Find the active 'repairDatabase' op and kill it. + if ( o.active && o.query.repairDatabase ) { + db.killOp( o.opid ); + return; + } + } + } +} + +s = startParallelShell( killRepair.toString() + "; killRepair();" ); + +// Repair should fail due to killOp. +assert.commandFailed( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) ); + +s(); + +assert.eq( 20000, db[ baseName ].find().itcount() ); +assert( db[ baseName ].validate().valid ); diff --git a/jstests/distinct1.js b/jstests/distinct1.js index 5e47400..1b9354f 100644 --- a/jstests/distinct1.js +++ b/jstests/distinct1.js @@ -25,3 +25,4 @@ t.save( { a : { b : "c" } , c : 12 } ); res = t.distinct( "a.b" ); assert.eq( "a,b,c" , res.toString() , "B1" ); +assert.eq( "BasicCursor" , t._distinct( "a.b" ).stats.cursor , "B2" ) diff --git a/jstests/distinct_index1.js b/jstests/distinct_index1.js index 8677457..64dc280 100644 --- a/jstests/distinct_index1.js +++ b/jstests/distinct_index1.js @@ -48,3 +48,13 @@ x = d( "b" , { a : { $gt : 5 } } ); assert.eq( 398 , x.stats.n , "BC1" ) assert.eq( 398 , x.stats.nscanned , "BC2" ) assert.eq( 398 , x.stats.nscannedObjects , "BC3" ) + +// Check proper nscannedObjects count when using a query optimizer cursor. +t.dropIndexes(); +t.ensureIndex( { a : 1, b : 1 } ); +x = d( "b" , { a : { $gt : 5 }, b : { $gt : 5 } } ); +assert.eq( "QueryOptimizerCursor", x.stats.cursor ); +assert.eq( 171 , x.stats.n ) +assert.eq( 275 , x.stats.nscanned ) +// Disable temporarily - exact value doesn't matter. +// assert.eq( 266 , x.stats.nscannedObjects ) diff --git a/jstests/drop2.js b/jstests/drop2.js index a1d619d..87e646e 100644 --- a/jstests/drop2.js +++ b/jstests/drop2.js @@ -26,7 +26,7 @@ function op( drop ) { return null; } -s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { ; } } } )" ); +s1 = startParallelShell( "db.jstests_drop2.count( { $where: function() { while( 1 ) { sleep( 1 ); } } } )" ); countOp = null; assert.soon( function() { countOp = op( false ); return countOp; } ); diff --git a/jstests/drop3.js b/jstests/drop3.js new file mode 100644 index 0000000..b2ca94a --- /dev/null +++ b/jstests/drop3.js @@ -0,0 +1,29 @@ +t = db.jstests_drop3; +sub = t.sub; + +t.drop(); +sub.drop(); + + +for (var i = 0; i < 10; i++){ + t.insert({}); + sub.insert({}); +} + +var cursor = t.find().batchSize(2); +var subcursor = sub.find().batchSize(2); + +cursor.next(); +subcursor.next(); +assert.eq( cursor.objsLeftInBatch(), 1 ); +assert.eq( subcursor.objsLeftInBatch(), 1 ); + +t.drop(); // should invalidate cursor, but not subcursor +db.getLastError(); + +assert.throws( function(){ cursor.itcount() } ); // throws "cursor doesn't exist on server" error on getMore +assert.eq( subcursor.itcount(), 9 ); //one already seen + + + + diff --git a/jstests/dropdb.js b/jstests/dropdb.js new file mode 100644 index 0000000..0b83884 --- /dev/null +++ b/jstests/dropdb.js @@ -0,0 +1,17 @@ +// Test that a db does not exist after it is dropped. +// Disabled in the small oplog suite because the slave may create a master db +// with the same name as the dropped db when requesting a clone. + +m = db.getMongo(); +baseName = "jstests_dropdb"; +ddb = db.getSisterDB( baseName ); + +ddb.c.save( {} ); +ddb.getLastError(); +assert.neq( -1, m.getDBNames().indexOf( baseName ) ); + +ddb.dropDatabase(); +assert.eq( -1, m.getDBNames().indexOf( baseName ) ); + +ddb.dropDatabase(); +assert.eq( -1, m.getDBNames().indexOf( baseName ) ); diff --git a/jstests/dropdb_race.js b/jstests/dropdb_race.js new file mode 100644 index 0000000..bff7980 --- /dev/null +++ b/jstests/dropdb_race.js @@ -0,0 +1,44 @@ +// test dropping a db with simultaneous commits + +m = db.getMongo(); +baseName = "jstests_dur_droprace"; +d = db.getSisterDB(baseName); +t = d.foo; + +assert(d.adminCommand({ setParameter: 1, syncdelay: 5 }).ok); + +var s = 0; + +var start = new Date(); + +for (var pass = 0; pass < 100; pass++) { + if (pass % 2 == 0) { + // sometimes wait for create db first, to vary the timing of things + t.insert({}); + if( pass % 4 == 0 ) + d.runCommand({getLastError:1,j:1}); + else + d.getLastError(); + } + t.insert({ x: 1 }); + t.insert({ x: 3 }); + t.ensureIndex({ x: 1 }); + sleep(s); + if (pass % 37 == 0) + d.adminCommand("closeAllDatabases"); + else if (pass % 13 == 0) + t.drop(); + else if (pass % 17 == 0) + t.dropIndexes(); + else + d.dropDatabase(); + if (pass % 7 == 0) + d.runCommand({getLastError:1,j:1}); + d.getLastError(); + s = (s + 1) % 25; + //print(pass); + if ((new Date()) - start > 60000) { + print("stopping early"); + break; + } +} diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js index f169f06..9131aed 100644 --- a/jstests/dur/closeall.js +++ b/jstests/dur/closeall.js @@ -8,7 +8,9 @@ function f() { var ourdb = "closealltest"; print("closeall.js start mongod variant:" + variant); - var options = (new Date()-0)%2==0 ? 8 : 0; + var R = (new Date()-0)%2; + var QuickCommits = (new Date()-0)%3 == 0; + var options = R==0 ? 8 : 0; // 8 is DurParanoid print("closeall.js --durOptions " + options); var N = 1000; if (options) @@ -23,6 +25,10 @@ function f() { // we'll use two connections to make a little parallelism var db1 = conn.getDB(ourdb); var db2 = new Mongo(db1.getMongo().host).getDB(ourdb); + if( QuickCommits ) { + print("closeall.js QuickCommits variant (using a small syncdelay)"); + assert( db2.adminCommand({setParameter:1, syncdelay:5}).ok ); + } print("closeall.js run test"); @@ -34,9 +40,9 @@ function f() { db1.foo.update({ x: 99 }, { a: 1, b: 2, c: 3, d: 4 }); if (i % 100 == 0) db1.foo.find(); - if( i == 800 ) + if( i == 800 ) db1.foo.ensureIndex({ x: 1 }); - var res = null; + var res = null; try { if( variant == 1 ) sleep(0); @@ -44,37 +50,37 @@ function f() { sleep(1); else if( variant == 3 && i % 10 == 0 ) print(i); - res = db2.adminCommand("closeAllDatabases"); - } - catch (e) { - sleep(5000); // sleeping a little makes console output order prettier - print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i); - try { - print("getlasterror:"); - printjson(db2.getLastErrorObj()); - print("trying one more closealldatabases:"); - res = db2.adminCommand("closeAllDatabases"); - printjson(res); - } - catch (e) { - print("got another exception : " + e); - } - print("\n\n\n"); - // sleep a little to capture possible mongod output? - sleep(2000); - throw e; - } - assert( res.ok, "closeAllDatabases res.ok=false"); - } - - print("closeall.js end test loop. slave.foo.count:"); - print(slave.foo.count()); - - print("closeall.js shutting down servers"); - stopMongod(30002); - stopMongod(30001); -} - -f(); + res = db2.adminCommand("closeAllDatabases"); + } + catch (e) { + sleep(5000); // sleeping a little makes console output order prettier + print("\n\n\nFAIL closeall.js closeAllDatabases command invocation threw an exception. i:" + i); + try { + print("getlasterror:"); + printjson(db2.getLastErrorObj()); + print("trying one more closealldatabases:"); + res = db2.adminCommand("closeAllDatabases"); + printjson(res); + } + catch (e) { + print("got another exception : " + e); + } + print("\n\n\n"); + // sleep a little to capture possible mongod output? + sleep(2000); + throw e; + } + assert( res.ok, "closeAllDatabases res.ok=false"); + } + + print("closeall.js end test loop. slave.foo.count:"); + print(slave.foo.count()); + + print("closeall.js shutting down servers"); + stopMongod(30002); + stopMongod(30001); +} + +f(); sleep(500); print("SUCCESS closeall.js"); diff --git a/jstests/dur/data/empty.bson b/jstests/dur/data/empty.bson new file mode 100644 index 0000000..e69de29 diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js index da45c20..c123ea1 100644 --- a/jstests/dur/diskfull.js +++ b/jstests/dur/diskfull.js @@ -14,23 +14,23 @@ for ( i in files ) { if ( !doIt ) { print( "path " + startPath + " missing, skipping diskfull test" ); doIt = false; -} - -function checkNoJournalFiles(path, pass) { - var files = listFiles(path); - if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) { - if (pass == null) { - // wait a bit longer for mongod to potentially finish if it is still running. - sleep(10000); - return checkNoJournalFiles(path, 1); - } - print("\n\n\n"); - print("FAIL path:" + path); - print("unexpected files:"); - printjson(files); - assert(false, "FAIL a journal/lsn file is present which is unexpected"); - } -} +} + +function checkNoJournalFiles(path, pass) { + var files = listFiles(path); + if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) { + if (pass == null) { + // wait a bit longer for mongod to potentially finish if it is still running. + sleep(10000); + return checkNoJournalFiles(path, 1); + } + print("\n\n\n"); + print("FAIL path:" + path); + print("unexpected files:"); + printjson(files); + assert(false, "FAIL a journal/lsn file is present which is unexpected"); + } +} /** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */ function clear() { @@ -56,7 +56,9 @@ function work() { d.foo.insert( { _id:i, b:big } ); } - d.getLastError(); + gle = d.getLastError(); + if ( gle ) + throw gle; } catch ( e ) { print( e ); raise( e ); @@ -86,9 +88,8 @@ function runFirstMongodAndFillDisk() { conn = startMongodNoReset("--port", 30001, "--dbpath", startPath, "--dur", "--smallfiles", "--durOptions", 8, "--noprealloc"); assert.throws( work, null, "no exception thrown when exceeding disk capacity" ); - waitMongoProgramOnPort( 30001 ); - - // the above wait doesn't work on windows + stopMongod( 30001 ); + sleep(5000); } @@ -104,9 +105,9 @@ function runSecondMongdAndRecover() { // stopMongod seems to be asynchronous (hmmm) so we sleep here. sleep(5000); - // at this point, after clean shutdown, there should be no journal files - log("check no journal files"); - checkNoJournalFiles(startPath + "/journal/"); + // at this point, after clean shutdown, there should be no journal files + log("check no journal files"); + checkNoJournalFiles(startPath + "/journal/"); log(); } @@ -133,4 +134,4 @@ if ( doIt ) { print(testname + " SUCCESS"); -} \ No newline at end of file +} diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js index 7f82cd7..2aabd4a 100644 --- a/jstests/dur/dropdb.js +++ b/jstests/dur/dropdb.js @@ -73,21 +73,28 @@ function verify() { var d = conn.getDB("test"); var count = d.foo.count(); if (count != 1) { - print("going to fail, count mismatch in verify()"); + print("going to fail, test.foo.count() != 1 in verify()"); sleep(10000); // easier to read the output this way print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count); - print(d.foo.count() + "\n\n\n"); + print(d.foo.count() + "\n\n\n"); assert(false); } assert(d.foo.findOne()._id == 100, "100"); print("dropdb.js teste.foo.findOne:"); - printjson(conn.getDB("teste").foo.findOne()); - - var teste = conn.getDB("teste"); - print("dropdb count " + teste.foo.count()); + printjson(conn.getDB("teste").foo.findOne()); + + var teste = conn.getDB("teste"); + var testecount = teste.foo.count(); + if (testecount != 1) { + print("going to fail, teste.foo.count() != 1 in verify()"); + sleep(10000); // easier to read the output this way + print("\n\n\ndropdb.js FAIL teste.foo.count() should be 1 but is : " + testecount); + print("\n\n\n"); + assert(false); + } + print("teste.foo.count() = " + teste.foo.count()); assert(teste.foo.findOne()._id == 99, "teste"); - } if (debugging) { diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js index 4c8f1bf..299ac30 100755 --- a/jstests/dur/dur1.js +++ b/jstests/dur/dur1.js @@ -75,7 +75,7 @@ function work() { } function verify() { - log("verify"); + log("verify test.foo.count == 2"); var d = conn.getDB("test"); var ct = d.foo.count(); if (ct != 2) { @@ -99,37 +99,38 @@ var path1 = "/data/db/" + testname+"nodur"; var path2 = "/data/db/" + testname+"dur"; // non-durable version -log(); +log("run mongod without journaling"); conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles"); work(); stopMongod(30000); // durable version -log(); -conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +log("run mongod with --journal"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8); work(); // wait for group commit. -printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); - -// kill the process hard +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + +// kill the process hard +log("kill 9"); stopMongod(30001, /*signal*/9); // journal file should be present, and non-empty as we killed hard // restart and recover -log(); -conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); +log("restart mongod --journal and recover"); +conn = startMongodNoReset("--port", 30002, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8); verify(); -log("stop"); +log("stop mongod"); stopMongod(30002); // stopMongod seems to be asynchronous (hmmm) so we sleep here. -sleep(5000); +// sleep(5000); // at this point, after clean shutdown, there should be no journal files -log("check no journal files"); +log("check no journal files (after presumably clean shutdown)"); checkNoJournalFiles(path2 + "/journal"); log("check data matches ns"); diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js new file mode 100755 index 0000000..5090b5b --- /dev/null +++ b/jstests/dur/dur1_tool.js @@ -0,0 +1,152 @@ +/* + test durability option with tools (same a dur1.js but use mongorestore to do repair) +*/ + +var debugging = false; +var testname = "dur1_tool"; +var step = 1; +var conn = null; + +function checkNoJournalFiles(path, pass) { + var files = listFiles(path); + if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) { + if (pass == null) { + // wait a bit longer for mongod to potentially finish if it is still running. + sleep(10000); + return checkNoJournalFiles(path, 1); + } + print("\n\n\n"); + print("FAIL path:" + path); + print("unexpected files:"); + printjson(files); + assert(false, "FAIL a journal/lsn file is present which is unexpected"); + } +} + +function runDiff(a, b) { + function reSlash(s) { + var x = s; + if (_isWindows()) { + while (1) { + var y = x.replace('/', '\\'); + if (y == x) + break; + x = y; + } + } + return x; + } + a = reSlash(a); + b = reSlash(b); + print("diff " + a + " " + b); + return run("diff", a, b); +} + +function log(str) { + print(); + if(str) + print(testname+" step " + step++ + " " + str); + else + print(testname+" step " + step++); +} + +// if you do inserts here, you will want to set _id. otherwise they won't match on different +// runs so we can't do a binary diff of the resulting files to check they are consistent. +function work() { + log("work"); + var d = conn.getDB("test"); + d.foo.insert({ _id: 3, x: 22 }); + d.foo.insert({ _id: 4, x: 22 }); + d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] }); + d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] }); + d.a.update({ _id: 4 }, { $inc: { x: 1} }); + + // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually: + d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1", v: 0 }); + +// d.a.update({ _id: 4 }, { $inc: { x: 1} }); +// d.a.reIndex(); + + // assure writes applied in case we kill -9 on return from this function + d.getLastError(); + + log("endwork"); + return d; +} + +function verify() { + log("verify test.foo.count == 2"); + var d = conn.getDB("test"); + var ct = d.foo.count(); + if (ct != 2) { + print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n"); + assert(ct == 2); + } +} + +if( debugging ) { + // mongod already running in debugger + conn = db.getMongo(); + work(); + sleep(30000); + quit(); +} + +log(); + +// directories +var path1 = "/data/db/" + testname+"nodur"; +var path2 = "/data/db/" + testname+"dur"; + +// non-durable version +log("run mongod without journaling"); +conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallfiles"); +work(); +stopMongod(30000); + +// durable version +log("run mongod with --journal"); +conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--journal", "--smallfiles", "--journalOptions", 8); +work(); + +// wait for group commit. +printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1})); + +// kill the process hard +log("kill 9"); +stopMongod(30001, /*signal*/9); + +// journal file should be present, and non-empty as we killed hard + +// mongorestore with --dbpath and --journal options should do a recovery pass +// empty.bson is an empty file so it won't actually insert anything +log("use mongorestore to recover"); +runMongoProgram("mongorestore", "--dbpath", path2, "--journal", "-d", "test", "-c", "empty", "jstests/dur/data/empty.bson"); + +// stopMongod seems to be asynchronous (hmmm) so we sleep here. +// sleep(5000); + +// at this point, after clean shutdown, there should be no journal files +log("check no journal files (after presumably clean shutdown)"); +checkNoJournalFiles(path2 + "/journal"); + +log("check data matches ns"); +var diff = runDiff(path1 + "/test.ns", path2 + "/test.ns"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.ns files differ"); + +log("check data matches .0"); +var diff = runDiff(path1 + "/test.0", path2 + "/test.0"); +if (diff != "") { + print("\n\n\nDIFFERS\n"); + print(diff); +} +assert(diff == "", "error test.0 files differ"); + +log("check data matches done"); + +print(testname + " SUCCESS"); + diff --git a/jstests/dur/indexbg.js b/jstests/dur/indexbg.js new file mode 100644 index 0000000..e78ae4a --- /dev/null +++ b/jstests/dur/indexbg.js @@ -0,0 +1,7 @@ +path = '/data/db/indexbg_dur'; + +m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles', '--journalOptions', 24 ); +t = m.getDB( 'test' ).test; +t.save( {x:1} ); +t.createIndex( {x:1}, {background:true} ); +t.count(); diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js new file mode 100644 index 0000000..6a0af24 --- /dev/null +++ b/jstests/dur/indexbg2.js @@ -0,0 +1,19 @@ +path = '/data/db/indexbg2_dur'; + +m = startMongodEmpty( '--port', 30001, '--dbpath', path, '--journal', '--smallfiles' ); + +t = m.getDB( 'test' ).test; +t.createIndex( {a:1} ); +t.createIndex( {b:1} ); +t.createIndex( {x:1}, {background:true} ); +for( var i = 0; i < 1000; ++i ) { + t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); + t.remove( {_id:i} ); +} +sleep( 1000 ); +for( var i = 1000; i < 2000; ++i ) { + t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); + t.remove( {_id:i} ); +} +t.insert( {_id:2000,a:'abcd',b:'bcde',x:'four score and seven years ago'} ); +assert( !t.getDB().getLastError() ); diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js index 04e4318..79f72a4 100755 --- a/jstests/dur/manyRestart.js +++ b/jstests/dur/manyRestart.js @@ -116,6 +116,10 @@ conn = startMongodEmpty("--port", 30000, "--dbpath", path1, "--nodur", "--smallf work(); stopMongod(30000); +// hail mary for windows +// Sat Jun 11 14:07:57 Error: boost::filesystem::create_directory: Access is denied: "\data\db\manyRestartsdur" (anon):1 +sleep(1000); + log("starting 30001"); conn = startMongodEmpty("--port", 30001, "--dbpath", path2, "--dur", "--smallfiles", "--durOptions", 8); work(); diff --git a/jstests/eval_nolock.js b/jstests/eval_nolock.js index 2688ec5..2ab96a3 100644 --- a/jstests/eval_nolock.js +++ b/jstests/eval_nolock.js @@ -10,7 +10,7 @@ res = db.runCommand( { eval : db.eval_nolock.insert( { _id : 123 } ); return db.eval_nolock.count(); } - , nlock : true } ); + , nolock : true } ); assert.eq( 11 , res.retval , "A" ) diff --git a/jstests/evalb.js b/jstests/evalb.js index 177930c..ea80331 100644 --- a/jstests/evalb.js +++ b/jstests/evalb.js @@ -11,7 +11,7 @@ db.setProfilingLevel( 2 ); assert.eq( 3, db.eval( function(){ return db.evalb.findOne().x; } ) , "B" ); o = db.system.profile.find().sort( { $natural : -1 } ).limit(1).next(); -assert( o.info.indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) ) +assert( tojson(o).indexOf( "findOne().x" ) > 0 , "C : " + tojson( o ) ) db.setProfilingLevel( 0 ); diff --git a/jstests/evalc.js b/jstests/evalc.js index 8a9e889..0320ecd 100644 --- a/jstests/evalc.js +++ b/jstests/evalc.js @@ -1,17 +1,24 @@ t = db.jstests_evalc; t.drop(); +t2 = db.evalc_done +t2.drop() + for( i = 0; i < 10; ++i ) { t.save( {i:i} ); } // SERVER-1610 -s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<500000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); " ) +assert.eq( 0 , t2.count() , "X1" ) + +s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<50000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); db.evalc_done.insert( { x : 1 } ); " ) print( "starting eval: " + Date() ) -for ( i=0; i<20000; i++ ){ +while ( true ) { db.eval( "db.jstests_evalc.count( {i:10} );" ); + if ( t2.count() > 0 ) + break; } print( "end eval: " + Date() ) diff --git a/jstests/evald.js b/jstests/evald.js index 78cabb6..7b18f3c 100644 --- a/jstests/evald.js +++ b/jstests/evald.js @@ -53,10 +53,10 @@ function doIt( ev, wait, where ) { } -doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", true, true ); -doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { ; } } } )", false, true ); -doIt( "while( true ) {;}", false ); -doIt( "while( true ) {;}", true ); +doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", true, true ); +doIt( "db.jstests_evald.count( { $where: function() { while( 1 ) { sleep(1); } } } )", false, true ); +doIt( "while( true ) { sleep(1);}", false ); +doIt( "while( true ) { sleep(1);}", true ); // the for loops are currently required, as a spawned op masks the parent op - see SERVER-1931 doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( {i:10} ); }", true ); @@ -65,4 +65,4 @@ doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count( doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count(); }", false ); doIt( "while( 1 ) { for( var i = 0; i < 10000; ++i ) {;} try { db.jstests_evald.count( {i:10} ); } catch ( e ) { } }", true ); -doIt( "while( 1 ) { try { while( 1 ) { ; } } catch ( e ) { } }", true ); +doIt( "while( 1 ) { try { while( 1 ) { sleep(1); } } catch ( e ) { } }", true ); diff --git a/jstests/exists3.js b/jstests/exists3.js new file mode 100644 index 0000000..53a69d6 --- /dev/null +++ b/jstests/exists3.js @@ -0,0 +1,21 @@ +// Check exists with non empty document, based on SERVER-2470 example. + +t = db.jstests_exists3; +t.drop(); + +t.insert({a: 1, b: 2}); + +assert.eq( 1, t.find({}).sort({c: -1}).itcount() ); +assert.eq( 1, t.count({c: {$exists: false}}) ); +assert.eq( 1, t.find({c: {$exists: false}}).itcount() ); +assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() ); + +// now we have an index on the sort key +t.ensureIndex({c: -1}) + +assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() ); +assert.eq( 1, t.find({c: {$exists: false}}).itcount() ); +// still ok without the $exists +assert.eq( 1, t.find({}).sort({c: -1}).itcount() ); +// and ok with a convoluted $not $exists +assert.eq( 1, t.find({c: {$not: {$exists: true}}}).sort({c: -1}).itcount() ); diff --git a/jstests/exists4.js b/jstests/exists4.js new file mode 100644 index 0000000..fb801ed --- /dev/null +++ b/jstests/exists4.js @@ -0,0 +1,20 @@ +// Check various exists cases, based on SERVER-1735 example. + +t = db.jstests_exists4; +t.drop(); + +t.ensureIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1}); +t.insert({ date: new Date("08/27/2010"), tot_visit: 100}); +t.insert({ date: new Date("08/27/2010"), country_code: "IT", tot_visit: 77}); +t.insert({ date: new Date("08/27/2010"), country_code: "ES", tot_visit: 23}); +t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "and...@spacca.org", tot_visit: 11}); +t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@gmail.com", tot_visit: 5}); +t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@progloedizioni.com", tot_visit: 7}); + +assert.eq( 6, t.find({date: new Date("08/27/2010")}).count() ); +assert.eq( 5, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}}).count() ); +assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: {$exists: false}}).count() ); +assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: null}).count() ); +assert.eq( 3, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}}).count() ); +assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: false}}).count() ); +assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null}).count() ); diff --git a/jstests/exists5.js b/jstests/exists5.js new file mode 100644 index 0000000..a90a94f --- /dev/null +++ b/jstests/exists5.js @@ -0,0 +1,33 @@ +// Test some $not/$exists cases. + +t = db.jstests_exists5; +t.drop(); + +t.save( {a:1} ); +assert.eq( 1, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) ); +assert.eq( 1, t.count( {'c.d':{$not:{$exists:true}}} ) ); +assert.eq( 0, t.count( {'a.b':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) ); +assert.eq( 0, t.count( {'c.d':{$not:{$exists:false}}} ) ); + +t.drop(); +t.save( {a:{b:1}} ); +assert.eq( 1, t.count( {'a.b':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) ); +assert.eq( 0, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) ); + +t.drop(); +t.save( {a:[1]} ); +assert.eq( 1, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) ); +assert.eq( 0, t.count( {'a.b':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) ); + +t.drop(); +t.save( {a:[{b:1}]} ); +assert.eq( 1, t.count( {'a.b':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) ); +assert.eq( 0, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) ); diff --git a/jstests/exists6.js b/jstests/exists6.js new file mode 100644 index 0000000..7c1cdc1 --- /dev/null +++ b/jstests/exists6.js @@ -0,0 +1,63 @@ +// SERVER-393 Test indexed matching with $exists. + +t = db.jstests_exists6; +t.drop(); + +t.ensureIndex( {b:1} ); +t.save( {} ); +t.save( {b:1} ); +t.save( {b:null} ); + +checkExists = function( query ) { + // Constraint on 'b' is trivial, so a BasicCursor is the default cursor type. + assert.eq( 'BasicCursor', t.find( query ).explain().cursor ); + // Index bounds include all elements. + assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).hint( {b:1} ).explain().indexBounds.b ); + // All keys must be scanned. + assert.eq( 3, t.find( query ).hint( {b:1} ).explain().nscanned ); + // 2 docs will match. + assert.eq( 2, t.find( query ).hint( {b:1} ).itcount() ); +} +checkExists( {b:{$exists:true}} ); +checkExists( {b:{$not:{$exists:false}}} ); + +checkMissing = function( query ) { + // Constraint on 'b' is nontrivial, so a BtreeCursor is the default cursor type. + assert.eq( 'BtreeCursor b_1', t.find( query ).explain().cursor ); + // Scan null index keys. + assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b ); + // Two existing null keys will be scanned. + assert.eq( 2, t.find( query ).explain().nscanned ); + // One doc is missing 'b'. + assert.eq( 1, t.find( query ).hint( {b:1} ).itcount() ); +} +checkMissing( {b:{$exists:false}} ); +checkMissing( {b:{$not:{$exists:true}}} ); + +// Now check existence of second compound field. +t.ensureIndex( {a:1,b:1} ); +t.save( {a:1} ); +t.save( {a:1,b:1} ); +t.save( {a:1,b:null} ); + +checkExists = function( query ) { + // Index bounds include all elements. + assert.eq( [ [ { $minElement:1 }, { $maxElement:1 } ] ], t.find( query ).explain().indexBounds.b ); + // All keys must be scanned. + assert.eq( 3, t.find( query ).explain().nscanned ); + // 2 docs will match. + assert.eq( 2, t.find( query ).hint( {a:1,b:1} ).itcount() ); +} +checkExists( {a:1,b:{$exists:true}} ); +checkExists( {a:1,b:{$not:{$exists:false}}} ); + +checkMissing = function( query ) { + // Scan null index keys. + assert.eq( [ [ null, null ] ], t.find( query ).explain().indexBounds.b ); + // Two existing null keys will be scanned. + assert.eq( 2, t.find( query ).explain().nscanned ); + // One doc is missing 'b'. + assert.eq( 1, t.find( query ).hint( {a:1,b:1} ).itcount() ); +} +checkMissing( {a:1,b:{$exists:false}} ); +checkMissing( {a:1,b:{$not:{$exists:true}}} ); diff --git a/jstests/exists7.js b/jstests/exists7.js new file mode 100644 index 0000000..14a9720 --- /dev/null +++ b/jstests/exists7.js @@ -0,0 +1,21 @@ + +// Test that non boolean value types are allowed with $explain spec. SERVER-2322 + +t = db.jstests_explain7; +t.drop(); + +function testIntegerExistsSpec() { + t.remove(); + t.save( {} ); + t.save( {a:1} ); + t.save( {a:2} ); + t.save( {a:3, b:3} ); + t.save( {a:4, b:4} ); + + assert.eq( 2, t.count( {b:{$exists:1}} ) ); + assert.eq( 3, t.count( {b:{$exists:0}} ) ); +} + +testIntegerExistsSpec(); +t.ensureIndex( {b:1} ); +testIntegerExistsSpec(); diff --git a/jstests/exists8.js b/jstests/exists8.js new file mode 100644 index 0000000..82f0c45 --- /dev/null +++ b/jstests/exists8.js @@ -0,0 +1,76 @@ +// Test $exists with array element field names SERVER-2897 + +t = db.jstests_exists8; +t.drop(); + +t.save( {a:[1]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.1':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.0':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.1':{$exists:true}} ) ); + +t.remove(); +t.save( {a:[1,2]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.1':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.0':{$exists:false}} ) ); +assert.eq( 1, t.count( {'a.1':{$exists:true}} ) ); + +t.remove(); +t.save( {a:[{}]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.1':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.0':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.1':{$exists:true}} ) ); + +t.remove(); +t.save( {a:[{},{}]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.1':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.0':{$exists:false}} ) ); +assert.eq( 1, t.count( {'a.1':{$exists:true}} ) ); + +t.remove(); +t.save( {a:[{'b':2},{'a':1}]} ); +assert.eq( 1, t.count( {'a.a':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.1.a':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.0.a':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[1]]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.0.0':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.0.0':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.0.0.0':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.0.0.0':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[[1]]]} ); +assert.eq( 1, t.count( {'a.0.0.0':{$exists:true}} ) ); + +t.remove(); +t.save( {a:[[{b:1}]]} ); +assert.eq( 0, t.count( {'a.b':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 1, t.count( {'a.0.b':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.0.b':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[],[{b:1}]]} ); +assert.eq( 0, t.count( {'a.0.b':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.0.b':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[],[{b:1}]]} ); +assert.eq( 1, t.count( {'a.1.b':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.1.b':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[],[{b:1}]]} ); +assert.eq( 1, t.count( {'a.1.0.b':{$exists:true}} ) ); +assert.eq( 0, t.count( {'a.1.0.b':{$exists:false}} ) ); + +t.remove(); +t.save( {a:[[],[{b:1}]]} ); +assert.eq( 0, t.count( {'a.1.1.b':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.1.1.b':{$exists:false}} ) ); diff --git a/jstests/exists9.js b/jstests/exists9.js new file mode 100644 index 0000000..66378d1 --- /dev/null +++ b/jstests/exists9.js @@ -0,0 +1,41 @@ +// SERVER-393 Test exists with various empty array and empty object cases. + +t = db.jstests_exists9; +t.drop(); + +// Check existence of missing nested field. +t.save( {a:{}} ); +assert.eq( 1, t.count( {'a.b':{$exists:false}} ) ); +assert.eq( 0, t.count( {'a.b':{$exists:true}} ) ); + +// With index. +t.ensureIndex( {'a.b':1} ); +assert.eq( 1, t.find( {'a.b':{$exists:false}} ).hint( {'a.b':1} ).itcount() ); +assert.eq( 0, t.find( {'a.b':{$exists:true}} ).hint( {'a.b':1} ).itcount() ); + +t.drop(); + +// Check that an empty array 'exists'. +t.save( {} ); +t.save( {a:[]} ); +assert.eq( 1, t.count( {a:{$exists:true}} ) ); +assert.eq( 1, t.count( {a:{$exists:false}} ) ); + +// With index. +t.ensureIndex( {a:1} ); +assert.eq( 1, t.find( {a:{$exists:true}} ).hint( {a:1} ).itcount() ); +assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).itcount() ); +assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).explain().nscanned ); + +t.drop(); + +// Check that an indexed field within an empty array does not exist. +t.save( {a:{'0':1}} ); +t.save( {a:[]} ); +assert.eq( 1, t.count( {'a.0':{$exists:true}} ) ); +assert.eq( 1, t.count( {'a.0':{$exists:false}} ) ); + +// With index. +t.ensureIndex( {'a.0':1} ); +assert.eq( 1, t.find( {'a.0':{$exists:true}} ).hint( {'a.0':1} ).itcount() ); +assert.eq( 1, t.find( {'a.0':{$exists:false}} ).hint( {'a.0':1} ).itcount() ); diff --git a/jstests/find8.js b/jstests/find8.js new file mode 100644 index 0000000..2ec368b --- /dev/null +++ b/jstests/find8.js @@ -0,0 +1,27 @@ +// SERVER-1932 Test unindexed matching of a range that is only valid in a multikey context. + +t = db.jstests_find8; +t.drop(); + +t.save( {a:[1,10]} ); +assert.eq( 1, t.count( { a: { $gt:2,$lt:5} } ) ); + +// Check that we can do a query with 'invalid' range. +assert.eq( 1, t.count( { a: { $gt:5,$lt:2} } ) ); + +t.save( {a:[-1,12]} ); + +// Check that we can do a query with 'invalid' range and sort. +assert.eq( 1, t.find( { a: { $gt:5,$lt:2} } ).sort( {a:1} ).toArray()[ 0 ].a[ 0 ] ); +assert.eq( 2, t.find( { a: { $gt:5,$lt:2} } ).sort( {$natural:-1} ).itcount() ); + +// SERVER-2864 +if( 0 ) { +t.find( { a: { $gt:5,$lt:2} } ).itcount(); +// Check that we can record a plan for an 'invalid' range. +assert( t.find( { a: { $gt:5,$lt:2} } ).explain( true ).oldPlan ); +} + +t.ensureIndex( {b:1} ); +// Check that if we do a table scan of an 'invalid' range in an or clause we don't check subsequent clauses. +assert.eq( "BasicCursor", t.find( { $or:[{ a: { $gt:5,$lt:2} }, {b:1}] } ).explain().cursor ); \ No newline at end of file diff --git a/jstests/find_and_modify2.js b/jstests/find_and_modify2.js index 108fc0f..2c8ab5b 100644 --- a/jstests/find_and_modify2.js +++ b/jstests/find_and_modify2.js @@ -8,3 +8,9 @@ assert.eq(out, {_id:1, i:1}); out = t.findAndModify({update: {$inc: {i:1}}, fields: {i:0}}); assert.eq(out, {_id:1, j:0}); + +out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}}); +assert.eq(out, {j:0}); + +out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}, 'new': true}); +assert.eq(out, {j:0}); diff --git a/jstests/fsync.js b/jstests/fsync.js index fccd623..134d558 100644 --- a/jstests/fsync.js +++ b/jstests/fsync.js @@ -1,22 +1,21 @@ // test the lock/unlock snapshotting feature a bit -x=db.runCommand({fsync:1,lock:1}); +x=db.runCommand({fsync:1,lock:1}); // not on admin db assert(!x.ok,"D"); -d=db.getSisterDB("admin"); - -x=d.runCommand({fsync:1,lock:1}); +x=db.fsyncLock(); // uses admin automatically assert(x.ok,"C"); -y = d.currentOp(); +y = db.currentOp(); assert(y.fsyncLock,"B"); -z = d.$cmd.sys.unlock.findOne(); +z = db.fsyncUnlock(); +assert( db.currentOp().fsyncLock == null, "A2" ); -// it will take some time to unlock, and unlock does not block and wait for that -// doing a write will make us wait until db is writeable. +// make sure the db is unlocked db.jstests_fsync.insert({x:1}); +db.getLastError(); -assert( d.currentOp().fsyncLock == null, "A" ); +assert( db.currentOp().fsyncLock == null, "A" ); diff --git a/jstests/geo10.js b/jstests/geo10.js new file mode 100644 index 0000000..39da09f --- /dev/null +++ b/jstests/geo10.js @@ -0,0 +1,21 @@ +// Test for SERVER-2746 + +coll = db.geo10 +coll.drop(); + +db.geo10.ensureIndex( { c : '2d', t : 1 }, { min : 0, max : Math.pow( 2, 40 ) } ) +assert( db.getLastError() == null, "B" ) +assert( db.system.indexes.count({ ns : "test.geo10" }) == 2, "A3" ) + +printjson( db.system.indexes.find().toArray() ) + +db.geo10.insert( { c : [ 1, 1 ], t : 1 } ) +assert.eq( db.getLastError(), null, "C" ) + +db.geo10.insert( { c : [ 3600, 3600 ], t : 1 } ) +assert( db.getLastError() == null, "D" ) + +db.geo10.insert( { c : [ 0.001, 0.001 ], t : 1 } ) +assert( db.getLastError() == null, "E" ) + +printjson( db.geo10.find({ c : { $within : { $box : [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]] } }, t : 1 }).toArray() ) diff --git a/jstests/geo4.js b/jstests/geo4.js index 73b4020..78404ab 100644 --- a/jstests/geo4.js +++ b/jstests/geo4.js @@ -4,7 +4,7 @@ t.drop(); t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } ); t.ensureIndex( { loc : "2d" }, { bits : 33 } ); -assert.eq( db.getLastError() , "can't have more than 32 bits in geo index" , "a" ); +assert.eq( db.getLastError() , "bits in geo index must be between 1 and 32" , "a" ); t.ensureIndex( { loc : "2d" }, { bits : 32 } ); assert( !db.getLastError(), "b" ); diff --git a/jstests/geo_array0.js b/jstests/geo_array0.js new file mode 100644 index 0000000..2d69611 --- /dev/null +++ b/jstests/geo_array0.js @@ -0,0 +1,25 @@ +// Make sure the very basics of geo arrays are sane by creating a few multi location docs + +t = db.geoarray +t.drop(); + +t.insert( { zip : "10001", loc : { home : [ 10, 10 ], work : [ 50, 50 ] } } ) +t.insert( { zip : "10002", loc : { home : [ 20, 20 ], work : [ 50, 50 ] } } ) +t.insert( { zip : "10003", loc : { home : [ 30, 30 ], work : [ 50, 50 ] } } ) +assert.isnull( db.getLastError() ) + +t.ensureIndex( { loc : "2d", zip : 1 } ); +assert.isnull( db.getLastError() ) +assert.eq( 2, t.getIndexKeys().length ) + +t.insert( { zip : "10004", loc : { home : [ 40, 40 ], work : [ 50, 50 ] } } ) + +assert.isnull( db.getLastError() ) + +// test normal access + +printjson( t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() ) + +assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() ); + +assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() ); diff --git a/jstests/geo_array1.js b/jstests/geo_array1.js new file mode 100644 index 0000000..56b7c85 --- /dev/null +++ b/jstests/geo_array1.js @@ -0,0 +1,30 @@ +// Make sure many locations in one doc works, in the form of an array + +t = db.geoarray1 +t.drop(); + +var locObj = [] + +// Add locations everywhere +for ( var i = 0; i < 10; i++ ) { + for ( var j = 0; j < 10; j++ ) { + if ( j % 2 == 0 ) + locObj.push( [ i, j ] ) + else + locObj.push( { x : i, y : j } ) + } +} + +// Add docs with all these locations +for( var i = 0; i < 300; i++ ){ + t.insert( { loc : locObj } ) +} +t.ensureIndex( { loc : "2d" } ) + +// Pull them back +for ( var i = 0; i < 10; i++ ) { + for ( var j = 0; j < 10; j++ ) { + assert.eq( 300, t.find( { loc : { $within : { $box : [ [ i - 0.5, j - 0.5 ], [ i + 0.5, j + 0.5 ] ] } } } ) + .count() ) + } +} diff --git a/jstests/geo_array2.js b/jstests/geo_array2.js new file mode 100644 index 0000000..28cb152 --- /dev/null +++ b/jstests/geo_array2.js @@ -0,0 +1,163 @@ +// Check the semantics of near calls with multiple locations + +t = db.geoarray2 +t.drop(); + +var numObjs = 10; +var numLocs = 100; + +// Test the semantics of near / nearSphere / etc. queries with multiple keys per object + +for( var i = -1; i < 2; i++ ){ + for(var j = -1; j < 2; j++ ){ + + locObj = [] + + if( i != 0 || j != 0 ) + locObj.push( { x : i * 50 + Random.rand(), + y : j * 50 + Random.rand() } ) + locObj.push( { x : Random.rand(), + y : Random.rand() } ) + locObj.push( { x : Random.rand(), + y : Random.rand() } ) + + t.insert({ name : "" + i + "" + j , loc : locObj , type : "A" }) + t.insert({ name : "" + i + "" + j , loc : locObj , type : "B" }) + } +} + +t.ensureIndex({ loc : "2d" , type : 1 }) + +assert.isnull( db.getLastError() ) + +print( "Starting testing phase... ") + +for( var t = 0; t < 2; t++ ){ + +var type = t == 0 ? "A" : "B" + +for( var i = -1; i < 2; i++ ){ + for(var j = -1; j < 2; j++ ){ + + var center = [ i * 50 , j * 50 ] + var count = i == 0 && j == 0 ? 2 * 9 : 1 + var objCount = i == 0 && j == 0 ? 2 : 1 + + // Do near check + + var nearResults = db.runCommand( { geoNear : "geoarray2" , + near : center , + num : count, + query : { type : type } } ).results + //printjson( nearResults ) + + var objsFound = {} + var lastResult = 0; + for( var k = 0; k < nearResults.length; k++ ){ + + // All distances should be small, for the # of results + assert.gt( 1.5 , nearResults[k].dis ) + // Distances should be increasing + assert.lte( lastResult, nearResults[k].dis ) + // Objs should be of the right type + assert.eq( type, nearResults[k].obj.type ) + + lastResult = nearResults[k].dis + + var objKey = "" + nearResults[k].obj._id + + if( objKey in objsFound ) objsFound[ objKey ]++ + else objsFound[ objKey ] = 1 + + } + + // Make sure we found the right objects each time + // Note: Multiple objects could be found for diff distances. + for( var q in objsFound ){ + assert.eq( objCount , objsFound[q] ) + } + + + // Do nearSphere check + + // Earth Radius + var eRad = 6371 + + nearResults = db.geoarray2.find( { loc : { $nearSphere : center , $maxDistance : 500 /* km */ / eRad }, type : type } ).toArray() + + assert.eq( nearResults.length , count ) + + objsFound = {} + lastResult = 0; + for( var k = 0; k < nearResults.length; k++ ){ + var objKey = "" + nearResults[k]._id + if( objKey in objsFound ) objsFound[ objKey ]++ + else objsFound[ objKey ] = 1 + + } + + // Make sure we found the right objects each time + for( var q in objsFound ){ + assert.eq( objCount , objsFound[q] ) + } + + + + // Within results do not return duplicate documents + + var count = i == 0 && j == 0 ? 9 : 1 + var objCount = i == 0 && j == 0 ? 1 : 1 + + // Do within check + objsFound = {} + + var box = [ [center[0] - 1, center[1] - 1] , [center[0] + 1, center[1] + 1] ] + + //printjson( box ) + + var withinResults = db.geoarray2.find({ loc : { $within : { $box : box } } , type : type }).toArray() + + assert.eq( withinResults.length , count ) + + for( var k = 0; k < withinResults.length; k++ ){ + var objKey = "" + withinResults[k]._id + if( objKey in objsFound ) objsFound[ objKey ]++ + else objsFound[ objKey ] = 1 + } + + //printjson( objsFound ) + + // Make sure we found the right objects each time + for( var q in objsFound ){ + assert.eq( objCount , objsFound[q] ) + } + + + // Do within check (circle) + objsFound = {} + + withinResults = db.geoarray2.find({ loc : { $within : { $center : [ center, 1.5 ] } } , type : type }).toArray() + + assert.eq( withinResults.length , count ) + + for( var k = 0; k < withinResults.length; k++ ){ + var objKey = "" + withinResults[k]._id + if( objKey in objsFound ) objsFound[ objKey ]++ + else objsFound[ objKey ] = 1 + } + + // Make sure we found the right objects each time + for( var q in objsFound ){ + assert.eq( objCount , objsFound[q] ) + } + + + + } +} + +} + + + + diff --git a/jstests/geo_borders.js b/jstests/geo_borders.js index 85ffe35..9e8788a 100644 --- a/jstests/geo_borders.js +++ b/jstests/geo_borders.js @@ -1,10 +1,7 @@ - t = db.borders t.drop() -// FIXME: FAILS for all epsilon < 1 -epsilon = 1 -//epsilon = 0.99 +epsilon = 0.0001; // For these tests, *required* that step ends exactly on max min = -1 @@ -12,9 +9,9 @@ max = 1 step = 1 numItems = 0; -for(var x = min; x <= max; x += step){ - for(var y = min; y <= max; y += step){ - t.insert({ loc: { x : x, y : y } }) +for ( var x = min; x <= max; x += step ) { + for ( var y = min; y <= max; y += step ) { + t.insert( { loc : { x : x, y : y } } ) numItems++; } } @@ -23,167 +20,149 @@ overallMin = -1 overallMax = 1 // Create a point index slightly smaller than the points we have -t.ensureIndex({ loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2}) -assert(db.getLastError(), "A1") +t.ensureIndex( { loc : "2d" }, { max : overallMax - epsilon / 2, min : overallMin + epsilon / 2 } ) +assert( db.getLastError() ) -// FIXME: FAILS for all epsilon < 1 // Create a point index only slightly bigger than the points we have -t.ensureIndex({ loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon }) -assert.isnull(db.getLastError(), "A2") - - - - - - - +t.ensureIndex( { loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon } ) +assert.isnull( db.getLastError() ) -//************ +// ************ // Box Tests -//************ - - -/* -// FIXME: Fails w/ non-nice error -// Make sure we can get all points in full bounds -assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon, - overallMin - epsilon], - [overallMax + epsilon, - overallMax + epsilon]] } } }).count(), "B1"); -*/ - -// Make sure an error is thrown if the bounds are bigger than the box itself -// TODO: Do we really want an error in this case? Shouldn't we just clip the box? -try{ - t.findOne({ loc : { $within : { $box : [[overallMin - 2 * epsilon, - overallMin - 2 * epsilon], - [overallMax + 2 * epsilon, - overallMax + 2 * epsilon]] } } }); - assert(false, "B2"); -} -catch(e){} - -//Make sure we can get at least close to the bounds of the index -assert(numItems == t.find({ loc : { $within : { $box : [[overallMin - epsilon / 2, - overallMin - epsilon / 2], - [overallMax + epsilon / 2, - overallMax + epsilon / 2]] } } }).count(), "B3"); - - -//************** -//Circle tests -//************** - -center = (overallMax + overallMin) / 2 -center = [center, center] +// ************ + +// If the bounds are bigger than the box itself, just clip at the borders +assert.eq( numItems, t.find( + { loc : { $within : { $box : [ + [ overallMin - 2 * epsilon, overallMin - 2 * epsilon ], + [ overallMax + 2 * epsilon, overallMax + 2 * epsilon ] ] } } } ).count() ); + +// Check this works also for bounds where only a single dimension is off-bounds +assert.eq( numItems - 5, t.find( + { loc : { $within : { $box : [ + [ overallMin - 2 * epsilon, overallMin - 0.5 * epsilon ], + [ overallMax - epsilon, overallMax - epsilon ] ] } } } ).count() ); + +// Make sure we can get at least close to the bounds of the index +assert.eq( numItems, t.find( + { loc : { $within : { $box : [ + [ overallMin - epsilon / 2, overallMin - epsilon / 2 ], + [ overallMax + epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() ); + +// Make sure we can get at least close to the bounds of the index +assert.eq( numItems, t.find( + { loc : { $within : { $box : [ + [ overallMax + epsilon / 2, overallMax + epsilon / 2 ], + [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() ); + +// Check that swapping min/max has good behavior +assert.eq( numItems, t.find( + { loc : { $within : { $box : [ + [ overallMax + epsilon / 2, overallMax + epsilon / 2 ], + [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() ); + +assert.eq( numItems, t.find( + { loc : { $within : { $box : [ + [ overallMax + epsilon / 2, overallMin - epsilon / 2 ], + [ overallMin - epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() ); + +// ************** +// Circle tests +// ************** + +center = ( overallMax + overallMin ) / 2 +center = [ center, center ] radius = overallMax -offCenter = [center[0] + radius, center[1] + radius] -onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon] -offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon] - +offCenter = [ center[0] + radius, center[1] + radius ] +onBounds = [ offCenter[0] + epsilon, offCenter[1] + epsilon ] +offBounds = [ onBounds[0] + epsilon, onBounds[1] + epsilon ] +onBoundsNeg = [ -onBounds[0], -onBounds[1] ] -//Make sure we can get all points when radius is exactly at full bounds -assert(0 < t.find({ loc : { $within : { $center : [center, radius + epsilon] } } }).count(), "C1"); +// Make sure we can get all points when radius is exactly at full bounds +assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + epsilon ] } } } ).count() ); -//Make sure we can get points when radius is over full bounds -assert(0 < t.find({ loc : { $within : { $center : [center, radius + 2 * epsilon] } } }).count(), "C2"); +// Make sure we can get points when radius is over full bounds +assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + 2 * epsilon ] } } } ).count() ); -//Make sure we can get points when radius is over full bounds, off-centered -assert(0 < t.find({ loc : { $within : { $center : [offCenter, radius + 2 * epsilon] } } }).count(), "C3"); +// Make sure we can get points when radius is over full bounds, off-centered +assert.lt( 0, t.find( { loc : { $within : { $center : [ offCenter, radius + 2 * epsilon ] } } } ).count() ); -//Make sure we get correct corner point when center is in bounds +// Make sure we get correct corner point when center is in bounds // (x bounds wrap, so could get other corner) -cornerPt = t.findOne({ loc : { $within : { $center : [offCenter, step / 2] } } }); -assert(cornerPt.loc.y == overallMax, "C4") +cornerPt = t.findOne( { loc : { $within : { $center : [ offCenter, step / 2 ] } } } ); +assert.eq( cornerPt.loc.y, overallMax ) -/* -// FIXME: FAILS, returns opposite corner // Make sure we get correct corner point when center is on bounds -cornerPt = t.findOne({ loc : { $within : { $center : [onBounds, - Math.sqrt(2 * epsilon * epsilon) + (step / 2) ] } } }); -assert(cornerPt.loc.y == overallMax, "C5") -*/ +// NOTE: Only valid points on MIN bounds +cornerPt = t + .findOne( { loc : { $within : { $center : [ onBoundsNeg, Math.sqrt( 2 * epsilon * epsilon ) + ( step / 2 ) ] } } } ); +assert.eq( cornerPt.loc.y, overallMin ) -// TODO: Handle gracefully? // Make sure we can't get corner point when center is over bounds -try{ - t.findOne({ loc : { $within : { $center : [offBounds, - Math.sqrt(8 * epsilon * epsilon) + (step / 2) ] } } }); - assert(false, "C6") +try { + t.findOne( { loc : { $within : { $center : [ offBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } ); + assert( false ) +} catch (e) { } -catch(e){} - - - - - - -//*********** -//Near tests -//*********** - -//Make sure we can get all nearby points to point in range -assert(t.find({ loc : { $near : offCenter } }).next().loc.y == overallMax, - "D1"); - -/* -// FIXME: FAILS, returns opposite list -// Make sure we can get all nearby points to point on boundary -assert(t.find({ loc : { $near : onBounds } }).next().loc.y == overallMax, - "D2"); -*/ - -//TODO: Could this work? -//Make sure we can't get all nearby points to point over boundary -try{ - t.findOne({ loc : { $near : offBounds } }) - assert(false, "D3") +// Make sure we can't get corner point when center is on max bounds +try { + t.findOne( { loc : { $within : { $center : [ onBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } ); + assert( false ) +} catch (e) { } -catch(e){} - -/* -// FIXME: FAILS, returns only single point -//Make sure we can get all nearby points within one step (4 points in top corner) -assert(4 == t.find({ loc : { $near : offCenter, $maxDistance : step * 1.9 } }).count(), - "D4"); -*/ +// *********** +// Near tests +// *********** +// Make sure we can get all nearby points to point in range +assert.eq( overallMax, t.find( { loc : { $near : offCenter } } ).next().loc.y ); -//************** -//Command Tests -//************** +// Make sure we can get all nearby points to point on boundary +assert.eq( overallMin, t.find( { loc : { $near : onBoundsNeg } } ).next().loc.y ); +// Make sure we can't get all nearby points to point over boundary +try { + t.findOne( { loc : { $near : offBounds } } ) + assert( false ) +} catch (e) { +} +// Make sure we can't get all nearby points to point on max boundary +try { + t.findOne( { loc : { $near : onBoundsNeg } } ) + assert( false ) +} catch (e) { +} -//Make sure we can get all nearby points to point in range -assert(db.runCommand({ geoNear : "borders", near : offCenter }).results[0].obj.loc.y == overallMax, - "E1"); +// Make sure we can get all nearby points within one step (4 points in top +// corner) +assert.eq( 4, t.find( { loc : { $near : offCenter, $maxDistance : step * 1.9 } } ).count() ); +// ************** +// Command Tests +// ************** +// Make sure we can get all nearby points to point in range +assert.eq( overallMax, db.runCommand( { geoNear : "borders", near : offCenter } ).results[0].obj.loc.y ); -/* -// FIXME: FAILS, returns opposite list -//Make sure we can get all nearby points to point on boundary -assert(db.runCommand({ geoNear : "borders", near : onBounds }).results[0].obj.loc.y == overallMax, - "E2"); -*/ +// Make sure we can get all nearby points to point on boundary +assert.eq( overallMin, db.runCommand( { geoNear : "borders", near : onBoundsNeg } ).results[0].obj.loc.y ); -//TODO: Could this work? -//Make sure we can't get all nearby points to point over boundary -try{ - db.runCommand({ geoNear : "borders", near : offBounds }).results.length - assert(false, "E3") +// Make sure we can't get all nearby points to point over boundary +try { + db.runCommand( { geoNear : "borders", near : offBounds } ).results.length + assert( false ) +} catch (e) { } -catch(e){} - - -/* -// FIXME: Fails, returns one point -//Make sure we can get all nearby points within one step (4 points in top corner) -assert(4 == db.runCommand({ geoNear : "borders", near : offCenter, maxDistance : step * 1.5 }).results.length, - "E4"); -*/ - +// Make sure we can't get all nearby points to point on max boundary +try { + db.runCommand( { geoNear : "borders", near : onBounds } ).results.length + assert( false ) +} catch (e) { +} +// Make sure we can get all nearby points within one step (4 points in top +// corner) +assert.eq( 4, db.runCommand( { geoNear : "borders", near : offCenter, maxDistance : step * 1.5 } ).results.length ); diff --git a/jstests/geo_center_sphere2.js b/jstests/geo_center_sphere2.js new file mode 100644 index 0000000..c9c5fbb --- /dev/null +++ b/jstests/geo_center_sphere2.js @@ -0,0 +1,158 @@ +// +// Tests the error handling of spherical queries +// along with multi-location documents. +// This is necessary since the error handling must manage +// multiple documents, and so requires simultaneous testing. +// + +var numTests = 30 + +for ( var test = 0; test < numTests; test++ ) { + + //var fixedTest = 6017 + //if( fixedTest ) test = fixedTest + + Random.srand( 1337 + test ); + + var radius = 5000 * Random.rand() // km + radius = radius / 6371 // radians + var numDocs = Math.floor( 400 * Random.rand() ) + // TODO: Wrapping uses the error value to figure out what would overlap... + var bits = Math.floor( 5 + Random.rand() * 28 ) + var maxPointsPerDoc = 50 + + t = db.sphere + + var randomPoint = function() { + return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ]; + } + + // Get a start point that doesn't require wrapping + // TODO: Are we a bit too aggressive with wrapping issues? + var startPoint + var ex = null + do { + + t.drop() + startPoint = randomPoint() + t.ensureIndex( { loc : "2d" }, { bits : bits } ) + + try { + // Check for wrapping issues + t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).toArray() + ex = null + } catch (e) { + ex = e + } + } while (ex) + + var pointsIn = 0 + var pointsOut = 0 + var docsIn = 0 + var docsOut = 0 + var totalPoints = 0 + + //var point = randomPoint() + + for ( var i = 0; i < numDocs; i++ ) { + + var numPoints = Math.floor( Random.rand() * maxPointsPerDoc + 1 ) + var docIn = false + var multiPoint = [] + + totalPoints += numPoints + + for ( var p = 0; p < numPoints; p++ ) { + var point = randomPoint() + multiPoint.push( point ) + + if ( Geo.sphereDistance( startPoint, point ) <= radius ) { + pointsIn++ + docIn = true + } else { + pointsOut++ + } + } + + t.insert( { loc : multiPoint } ) + + if ( docIn ) + docsIn++ + else + docsOut++ + + } + + printjson( { test: test, + radius : radius, bits : bits, numDocs : numDocs, pointsIn : pointsIn, docsIn : docsIn, pointsOut : pointsOut, + docsOut : docsOut } ) + + assert.isnull( db.getLastError() ) + assert.eq( docsIn + docsOut, numDocs ) + assert.eq( pointsIn + pointsOut, totalPoints ) + + // $centerSphere + assert.eq( docsIn, t.find( { loc : { $within : { $centerSphere : [ startPoint, radius ] } } } ).count() ) + + // $nearSphere + var results = t.find( { loc : { $nearSphere : startPoint, $maxDistance : radius } } ).limit( 2 * pointsIn ) + .toArray() + + assert.eq( pointsIn, results.length ) + + var distance = 0; + for ( var i = 0; i < results.length; i++ ) { + + var minNewDistance = radius + 1 + for( var j = 0; j < results[i].loc.length; j++ ){ + var newDistance = Geo.sphereDistance( startPoint, results[i].loc[j] ) + if( newDistance < minNewDistance && newDistance >= distance ) minNewDistance = newDistance + } + + //print( "Dist from : " + results[i].loc[j] + " to " + startPoint + " is " + // + minNewDistance + " vs " + radius ) + + assert.lte( minNewDistance, radius ) + assert.gte( minNewDistance, distance ) + distance = minNewDistance + + } + + // geoNear + var results = db.runCommand( { + geoNear : "sphere", near : startPoint, maxDistance : radius, num : 2 * pointsIn, spherical : true } ).results + + /* + printjson( results ); + + for ( var j = 0; j < results[0].obj.loc.length; j++ ) { + var newDistance = Geo.sphereDistance( startPoint, results[0].obj.loc[j] ) + if( newDistance <= radius ) print( results[0].obj.loc[j] + " : " + newDistance ) + } + */ + + assert.eq( pointsIn, results.length ) + + var distance = 0; + for ( var i = 0; i < results.length; i++ ) { + var retDistance = results[i].dis + + // print( "Dist from : " + results[i].loc + " to " + startPoint + " is " + // + retDistance + " vs " + radius ) + + var distInObj = false + for ( var j = 0; j < results[i].obj.loc.length && distInObj == false; j++ ) { + var newDistance = Geo.sphereDistance( startPoint, results[i].obj.loc[j] ) + distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 ) + } + + assert( distInObj ) + assert.lte( retDistance, radius ) + assert.gte( retDistance, distance ) + distance = retDistance + } + + //break; +} + + diff --git a/jstests/geo_distinct.js b/jstests/geo_distinct.js new file mode 100644 index 0000000..60e0d15 --- /dev/null +++ b/jstests/geo_distinct.js @@ -0,0 +1,16 @@ +// Test distinct with geo queries SERVER-2135 + +t = db.commits +t.drop() + +t.save( { _id : ObjectId( "4ce63ec2f360622431000013" ), loc : [ 55.59664, 13.00156 ], author : "FredrikL" } ) + +printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) ) +assert.isnull( db.getLastError() ) + +t.ensureIndex( { loc : '2d' } ) + +printjson( t.getIndexes() ) + +printjson( db.runCommand( { distinct : 'commits', key : 'loc' } ) ) +assert.isnull( db.getLastError() ) \ No newline at end of file diff --git a/jstests/geo_fiddly_box.js b/jstests/geo_fiddly_box.js new file mode 100644 index 0000000..2a9cf49 --- /dev/null +++ b/jstests/geo_fiddly_box.js @@ -0,0 +1,44 @@ +// Reproduces simple test for SERVER-2832 + +// The setup to reproduce was/is to create a set of points where the +// "expand" portion of the geo-lookup expands the 2d range in only one +// direction (so points are required on either side of the expanding range) + +db.geo_fiddly_box.drop(); +db.geo_fiddly_box.ensureIndex({ loc : "2d" }) + +db.geo_fiddly_box.insert({ "loc" : [3, 1] }) +db.geo_fiddly_box.insert({ "loc" : [3, 0.5] }) +db.geo_fiddly_box.insert({ "loc" : [3, 0.25] }) +db.geo_fiddly_box.insert({ "loc" : [3, -0.01] }) +db.geo_fiddly_box.insert({ "loc" : [3, -0.25] }) +db.geo_fiddly_box.insert({ "loc" : [3, -0.5] }) +db.geo_fiddly_box.insert({ "loc" : [3, -1] }) + +// OK! +print( db.geo_fiddly_box.count() ) +assert.eq( 7, db.geo_fiddly_box.count({ "loc" : { "$within" : { "$box" : [ [2, -2], [46, 2] ] } } }), "Not all locations found!" ); + + +// Test normal lookup of a small square of points as a sanity check. + +epsilon = 0.0001; +min = -1 +max = 1 +step = 1 +numItems = 0; + +db.geo_fiddly_box2.drop() +db.geo_fiddly_box2.ensureIndex({ loc : "2d" }, { max : max + epsilon / 2, min : min - epsilon / 2 }) + +for(var x = min; x <= max; x += step){ + for(var y = min; y <= max; y += step){ + db.geo_fiddly_box2.insert({ "loc" : { x : x, y : y } }) + numItems++; + } +} + +assert.eq( numItems, db.geo_fiddly_box2.count({ loc : { $within : { $box : [[min - epsilon / 3, + min - epsilon / 3], + [max + epsilon / 3, + max + epsilon / 3]] } } }), "Not all locations found!"); diff --git a/jstests/geo_fiddly_box2.js b/jstests/geo_fiddly_box2.js new file mode 100644 index 0000000..0588abf --- /dev/null +++ b/jstests/geo_fiddly_box2.js @@ -0,0 +1,32 @@ +// Reproduces simple test for SERVER-2115 + +// The setup to reproduce is to create a set of points and a really big bounds so that we are required to do +// exact lookups on the points to get correct results. + +t = db.geo_fiddly_box2 +t.drop() + +t.insert( { "letter" : "S", "position" : [ -3, 0 ] } ) +t.insert( { "letter" : "C", "position" : [ -2, 0 ] } ) +t.insert( { "letter" : "R", "position" : [ -1, 0 ] } ) +t.insert( { "letter" : "A", "position" : [ 0, 0 ] } ) +t.insert( { "letter" : "B", "position" : [ 1, 0 ] } ) +t.insert( { "letter" : "B", "position" : [ 2, 0 ] } ) +t.insert( { "letter" : "L", "position" : [ 3, 0 ] } ) +t.insert( { "letter" : "E", "position" : [ 4, 0 ] } ) + +t.ensureIndex( { position : "2d" } ) +result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } ) +assert.eq( 4, result.count() ) + +t.dropIndex( { position : "2d" } ) +t.ensureIndex( { position : "2d" }, { min : -10000000, max : 10000000 } ) + +result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } ) +assert.eq( 4, result.count() ) + +t.dropIndex( { position : "2d" } ) +t.ensureIndex( { position : "2d" }, { min : -1000000000, max : 1000000000 } ) + +result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } ) +assert.eq( 4, result.count() ) diff --git a/jstests/geo_group.js b/jstests/geo_group.js new file mode 100644 index 0000000..4e038f9 --- /dev/null +++ b/jstests/geo_group.js @@ -0,0 +1,35 @@ +t = db.geo_group; +t.drop(); + +n = 1; +for ( var x=-100; x<100; x+=2 ){ + for ( var y=-100; y<100; y+=2 ){ + t.insert( { _id : n++ , loc : [ x , y ] } ) + } +} + +t.ensureIndex( { loc : "2d" } ); + +// Test basic count with $near +assert.eq(t.find().count(), 10000); +assert.eq(t.find( { loc : { $within : {$center : [[56,8], 10]}}}).count(), 81); +assert.eq(t.find( { loc : { $near : [56, 8, 10] } } ).count(), 81); + +// Test basic group that effectively does a count +assert.eq( + t.group( { + reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} }, + initial : { sums:{count:0} } } + ), + [ { "sums" : { "count" : 10000 } } ] +); + +// Test basic group + $near that does a count +assert.eq( + t.group( { + reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1} }, + initial : { sums:{count:0} }, + cond : { loc : { $near : [56, 8, 10] } } } + ), + [ { "sums" : { "count" : 81 } } ] +); diff --git a/jstests/geo_mapreduce.js b/jstests/geo_mapreduce.js new file mode 100644 index 0000000..a6ecf76 --- /dev/null +++ b/jstests/geo_mapreduce.js @@ -0,0 +1,56 @@ +// Test script from SERVER-1742 + +// MongoDB test script for mapreduce with geo query + +// setup test collection +db.apples.drop() +db.apples.insert( { "geo" : { "lat" : 32.68331909, "long" : 69.41610718 }, "apples" : 5 } ); +db.apples.insert( { "geo" : { "lat" : 35.01860809, "long" : 70.92027283 }, "apples" : 2 } ); +db.apples.insert( { "geo" : { "lat" : 31.11639023, "long" : 64.19970703 }, "apples" : 11 } ); +db.apples.insert( { "geo" : { "lat" : 32.64500046, "long" : 69.36251068 }, "apples" : 4 } ); +db.apples.insert( { "geo" : { "lat" : 33.23638916, "long" : 69.81360626 }, "apples" : 9 } ); +db.apples.ensureIndex( { "geo" : "2d" } ); + +center = [ 32.68, 69.41 ]; +radius = 10 / 111; // 10km; 1 arcdegree ~= 111km +geo_query = { geo : { '$within' : { '$center' : [ center, radius ] } } }; + +// geo query on collection works fine +res = db.apples.find( geo_query ); +assert.eq( 2, res.count() ); + +// map function +m = function() { + emit( null, { "apples" : this.apples } ); +}; + +// reduce function +r = function(key, values) { + var total = 0; + for ( var i = 0; i < values.length; i++ ) { + total += values[i].apples; + } + return { "apples" : total }; +}; + +// mapreduce without geo query works fine +res = db.apples.mapReduce( m, r, { out : { inline : 1 } } ); + +printjson( res ) +total = res.results[0]; +assert.eq( 31, total.value.apples ); + +// mapreduce with regular query works fine too +res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : { apples : { '$lt' : 9 } } } ); +total = res.results[0]; +assert.eq( 11, total.value.apples ); + +// mapreduce with geo query gives error on mongodb version 1.6.2 +// uncaught exception: map reduce failed: { +// "assertion" : "manual matcher config not allowed", +// "assertionCode" : 13285, +// "errmsg" : "db assertion failure", +// "ok" : 0 } +res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : geo_query } ); +total = res.results[0]; +assert.eq( 9, total.value.apples ); diff --git a/jstests/geo_mapreduce2.js b/jstests/geo_mapreduce2.js new file mode 100644 index 0000000..9c39345 --- /dev/null +++ b/jstests/geo_mapreduce2.js @@ -0,0 +1,36 @@ +// Geo mapreduce 2 from SERVER-3478 + +var coll = db.geoMR2 +coll.drop() + +for( var i = 0; i < 300; i++ ) + coll.insert({ i : i, location : [ 10, 20 ] }) + +coll.ensureIndex({ location : "2d" }) + +// map function +m = function() { + emit( null, { count : this.i } ) +} + +// reduce function +r = function( key, values ) { + + var total = 0 + for ( var i = 0; i < values.length; i++ ) { + total += values[i].count + } + + return { count : total } +}; + +try{ coll.mapReduce( m, r, + { out : coll.getName() + "_mr", + sort : { _id : 1 }, + query : { 'location' : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } } } }) + +} +catch( e ){ + // This should occur, since we can't in-mem sort for mreduce + printjson( e ) +} diff --git a/jstests/geo_multinest0.js b/jstests/geo_multinest0.js new file mode 100644 index 0000000..68e6095 --- /dev/null +++ b/jstests/geo_multinest0.js @@ -0,0 +1,63 @@ +// Make sure nesting of location arrays also works. + +t = db.geonest +t.drop(); + +t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) +t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) +t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) +assert.isnull( db.getLastError() ) + +t.ensureIndex( { "data.loc" : "2d", zip : 1 } ); +assert.isnull( db.getLastError() ) +assert.eq( 2, t.getIndexKeys().length ) + +t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) +assert.isnull( db.getLastError() ) + +// test normal access + +printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() ) + +assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() ); + +assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() ); + + + + + +// Try a complex nesting + +t = db.geonest +t.drop(); + +t.insert( { zip : "10001", data : [ { loc : [ [ 10, 10 ], { lat : 50, long : 50 } ], type : "home" } ] } ) +t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) +t.insert( { zip : "10003", data : [ { loc : [ { x : 30, y : 30 }, [ 50, 50 ] ], type : "home" } ] } ) +assert.isnull( db.getLastError() ) + +t.ensureIndex( { "data.loc" : "2d", zip : 1 } ); +assert.isnull( db.getLastError() ) +assert.eq( 2, t.getIndexKeys().length ) + +t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" }, + { loc : [ 50, 50 ], type : "work" } ] } ) + + +assert.isnull( db.getLastError() ) + +// test normal access +printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() ) + +assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() ); + +assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() ); + + + diff --git a/jstests/geo_multinest1.js b/jstests/geo_multinest1.js new file mode 100644 index 0000000..7754f24 --- /dev/null +++ b/jstests/geo_multinest1.js @@ -0,0 +1,37 @@ +// Test distance queries with interleaved distances + +t = db.multinest +t.drop(); + +t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" }, + { loc : [ 29, 29 ], type : "work" } ] } ) +t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" }, + { loc : [ 39, 39 ], type : "work" } ] } ) +t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" }, + { loc : [ 49, 49 ], type : "work" } ] } ) +assert.isnull( db.getLastError() ) + +t.ensureIndex( { "data.loc" : "2d", zip : 1 } ); +assert.isnull( db.getLastError() ) +assert.eq( 2, t.getIndexKeys().length ) + +t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" }, + { loc : [ 59, 59 ], type : "work" } ] } ) +assert.isnull( db.getLastError() ) + +// test normal access + +var result = t.find({ "data.loc" : { $near : [0, 0] } }).toArray(); + +printjson( result ) + +assert.eq( 8, result.length ) + +var order = [ 1, 2, 1, 3, 2, 4, 3, 4 ] + +for( var i = 0; i < result.length; i++ ){ + assert.eq( "1000" + order[i], result[i].zip ) +} + + + diff --git a/jstests/geo_oob_sphere.js b/jstests/geo_oob_sphere.js new file mode 100644 index 0000000..d493f36 --- /dev/null +++ b/jstests/geo_oob_sphere.js @@ -0,0 +1,42 @@ +// +// Ensures spherical queries report invalid latitude values in points and center positions +// + +t = db.geooobsphere +t.drop(); + +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 89 } }) +t.insert({ loc : { x : 30, y : 91 } }) + +t.ensureIndex({ loc : "2d" }) +assert.isnull( db.getLastError() ) + +t.find({ loc : { $nearSphere : [ 30, 91 ], $maxDistance : 0.25 } }).count() +var err = db.getLastError() +assert( err != null ) +printjson( err ) + +t.find({ loc : { $nearSphere : [ 30, 89 ], $maxDistance : 0.25 } }).count() +var err = db.getLastError() +assert( err != null ) +printjson( err ) + +t.find({ loc : { $within : { $centerSphere : [[ -180, -91 ], 0.25] } } }).count() +var err = db.getLastError() +assert( err != null ) +printjson( err ) + +db.runCommand({ geoNear : "geooobsphere", near : [179, -91], maxDistance : 0.25, spherical : true }) +var err = db.getLastError() +assert( err != null ) +printjson( err ) + +db.runCommand({ geoNear : "geooobsphere", near : [30, 89], maxDistance : 0.25, spherical : true }) +var err = db.getLastError() +assert( err != null ) +printjson( err ) \ No newline at end of file diff --git a/jstests/geo_poly_edge.js b/jstests/geo_poly_edge.js new file mode 100644 index 0000000..31a0849 --- /dev/null +++ b/jstests/geo_poly_edge.js @@ -0,0 +1,22 @@ +// +// Tests polygon edge cases +// + +var coll = db.getCollection( 'jstests_geo_poly_edge' ) +coll.drop(); + +coll.ensureIndex({ loc : "2d" }) + +coll.insert({ loc : [10, 10] }) +coll.insert({ loc : [10, -10] }) + +assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, -10 ]] } } }).itcount(), 2 ) + +assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, 10 ]] } } }).itcount(), 1 ) + + +coll.insert({ loc : [179, 0] }) +coll.insert({ loc : [0, 179] }) + +assert.eq( coll.find({ loc : { $within : { $polygon : [[0, 0], [1000, 0], [1000, 1000], [0, 1000]] } } }).itcount(), 3 ) + diff --git a/jstests/geo_poly_line.js b/jstests/geo_poly_line.js new file mode 100644 index 0000000..aca77b6 --- /dev/null +++ b/jstests/geo_poly_line.js @@ -0,0 +1,17 @@ +// Test that weird polygons work SERVER-3725 + +t = db.geo_polygon5; +t.drop(); + +t.insert({loc:[0,0]}) +t.insert({loc:[1,0]}) +t.insert({loc:[2,0]}) +t.insert({loc:[3,0]}) +t.insert({loc:[4,0]}) + +t.ensureIndex( { loc : "2d" } ); + +printjson( t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).toArray() ) + +assert.eq( 5, t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).itcount() ) + diff --git a/jstests/geo_polygon1.js b/jstests/geo_polygon1.js new file mode 100644 index 0000000..4b7427a --- /dev/null +++ b/jstests/geo_polygon1.js @@ -0,0 +1,74 @@ +// +// Tests for N-dimensional polygon querying +// + +t = db.geo_polygon1; +t.drop(); + +num = 0; +for ( x=1; x < 9; x++ ){ + for ( y= 1; y < 9; y++ ){ + o = { _id : num++ , loc : [ x , y ] }; + t.save( o ); + } +} + +t.ensureIndex( { loc : "2d" } ); + +triangle = [[0,0], [1,1], [0,2]]; + +// Look at only a small slice of the data within a triangle +assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" ); + +boxBounds = [ [0,0], [0,10], [10,10], [10,0] ]; + +assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" ); + +//Make sure we can add object-based polygons +assert.eq( num, t.find( { loc : { $within : { $polygon : { a : [-10, -10], b : [-10, 10], c : [10, 10], d : [10, -10] } } } } ).count() ) + +// Look in a box much bigger than the one we have data in +boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]]; +assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" ); + +t.drop(); + +pacman = [ + [0,2], [0,4], [2,6], [4,6], // Head + [6,4], [4,3], [6,2], // Mouth + [4,0], [2,0] // Bottom + ]; + +t.save({loc: [1,3] }); // Add a point that's in +t.ensureIndex( { loc : "2d" } ); +assert.isnull( db.getLastError() ) + +assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" ); + +t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening +t.save({ loc : [3, 7] }) // Add a point above the center of the head +t.save({ loc : [3,-1] }) // Add a point below the center of the bottom + +assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" ); + +// Make sure we can't add bad polygons +okay = true +try{ + t.find( { loc : { $within : { $polygon : [1, 2] } } } ).toArray() + okay = false +} +catch(e){} +assert(okay) +try{ + t.find( { loc : { $within : { $polygon : [[1, 2]] } } } ).toArray() + okay = false +} +catch(e){} +assert(okay) +try{ + t.find( { loc : { $within : { $polygon : [[1, 2], [2, 3]] } } } ).toArray() + okay = false +} +catch(e){} +assert(okay) + diff --git a/jstests/geo_polygon2.js b/jstests/geo_polygon2.js new file mode 100644 index 0000000..617801b --- /dev/null +++ b/jstests/geo_polygon2.js @@ -0,0 +1,266 @@ +// +// More tests for N-dimensional polygon querying +// + +// Create a polygon of some shape (no holes) +// using turtle graphics. Basically, will look like a very contorted octopus (quad-pus?) shape. +// There are no holes, but some edges will probably touch. + +var numTests = 10 + +for ( var test = 0; test < numTests; test++ ) { + + Random.srand( 1337 + test ); + + var numTurtles = 4; + var gridSize = [ 40, 40 ]; + var turtleSteps = 500; + var bounds = [ Random.rand() * -1000000 + 0.00001, Random.rand() * 1000000 + 0.00001 ] + var rotation = Math.PI * Random.rand(); + var bits = Math.floor( Random.rand() * 32 ); + + printjson( { test : test, rotation : rotation, bits : bits }) + + var rotatePoint = function( x, y ) { + + if( y == undefined ){ + y = x[1] + x = x[0] + } + + xp = x * Math.cos( rotation ) - y * Math.sin( rotation ) + yp = y * Math.cos( rotation ) + x * Math.sin( rotation ) + + var scaleX = (bounds[1] - bounds[0]) / 360 + var scaleY = (bounds[1] - bounds[0]) / 360 + + x *= scaleX + y *= scaleY + + return [xp, yp] + + } + + + var grid = [] + for ( var i = 0; i < gridSize[0]; i++ ) { + grid.push( new Array( gridSize[1] ) ) + } + + grid.toString = function() { + + var gridStr = ""; + for ( var j = grid[0].length - 1; j >= -1; j-- ) { + for ( var i = 0; i < grid.length; i++ ) { + if ( i == 0 ) + gridStr += ( j == -1 ? " " : ( j % 10) ) + ": " + if ( j != -1 ) + gridStr += "[" + ( grid[i][j] != undefined ? grid[i][j] : " " ) + "]" + else + gridStr += " " + ( i % 10 ) + " " + } + gridStr += "\n" + } + + return gridStr; + } + + var turtles = [] + for ( var i = 0; i < numTurtles; i++ ) { + + var up = ( i % 2 == 0 ) ? i - 1 : 0; + var left = ( i % 2 == 1 ) ? ( i - 1 ) - 1 : 0; + + turtles[i] = [ + [ Math.floor( gridSize[0] / 2 ), Math.floor( gridSize[1] / 2 ) ], + [ Math.floor( gridSize[0] / 2 ) + left, Math.floor( gridSize[1] / 2 ) + up ] ]; + + grid[turtles[i][1][0]][turtles[i][1][1]] = i + + } + + grid[Math.floor( gridSize[0] / 2 )][Math.floor( gridSize[1] / 2 )] = "S" + + // print( grid.toString() ) + + var pickDirections = function() { + + var up = Math.floor( Random.rand() * 3 ) + if ( up == 2 ) + up = -1 + + if ( up == 0 ) { + var left = Math.floor( Random.rand() * 3 ) + if ( left == 2 ) + left = -1 + } else + left = 0 + + if ( Random.rand() < 0.5 ) { + var swap = left + left = up + up = swap + } + + return [ left, up ] + } + + for ( var s = 0; s < turtleSteps; s++ ) { + + for ( var t = 0; t < numTurtles; t++ ) { + + var dirs = pickDirections() + var up = dirs[0] + var left = dirs[1] + + var lastTurtle = turtles[t][turtles[t].length - 1] + var nextTurtle = [ lastTurtle[0] + left, lastTurtle[1] + up ] + + if ( nextTurtle[0] >= gridSize[0] || nextTurtle[1] >= gridSize[1] || nextTurtle[0] < 0 || nextTurtle[1] < 0 ) + continue; + + if ( grid[nextTurtle[0]][nextTurtle[1]] == undefined ) { + turtles[t].push( nextTurtle ) + grid[nextTurtle[0]][nextTurtle[1]] = t; + } + + } + } + + // print( grid.toString() ) + + turtlePaths = [] + for ( var t = 0; t < numTurtles; t++ ) { + + turtlePath = [] + + var nextSeg = function(currTurtle, prevTurtle) { + + var pathX = currTurtle[0] + + if ( currTurtle[1] < prevTurtle[1] ) { + pathX = currTurtle[0] + 1 + pathY = prevTurtle[1] + } else if ( currTurtle[1] > prevTurtle[1] ) { + pathX = currTurtle[0] + pathY = currTurtle[1] + } else if ( currTurtle[0] < prevTurtle[0] ) { + pathX = prevTurtle[0] + pathY = currTurtle[1] + } else if ( currTurtle[0] > prevTurtle[0] ) { + pathX = currTurtle[0] + pathY = currTurtle[1] + 1 + } + + // print( " Prev : " + prevTurtle + " Curr : " + currTurtle + " path + // : " + // + [pathX, pathY]); + + return [ pathX, pathY ] + } + + for ( var s = 1; s < turtles[t].length; s++ ) { + + currTurtle = turtles[t][s] + prevTurtle = turtles[t][s - 1] + + turtlePath.push( nextSeg( currTurtle, prevTurtle ) ) + + } + + for ( var s = turtles[t].length - 2; s >= 0; s-- ) { + + currTurtle = turtles[t][s] + prevTurtle = turtles[t][s + 1] + + turtlePath.push( nextSeg( currTurtle, prevTurtle ) ) + + } + + // printjson( turtlePath ) + + // End of the line is not inside our polygon. + var lastTurtle = turtles[t][turtles[t].length - 1] + grid[lastTurtle[0]][lastTurtle[1]] = undefined + + fixedTurtlePath = [] + for ( var s = 1; s < turtlePath.length; s++ ) { + + if ( turtlePath[s - 1][0] == turtlePath[s][0] && turtlePath[s - 1][1] == turtlePath[s][1] ) + continue; + + var up = turtlePath[s][1] - turtlePath[s - 1][1] + var right = turtlePath[s][0] - turtlePath[s - 1][0] + var addPoint = ( up != 0 && right != 0 ) + + if ( addPoint && up != right ) { + fixedTurtlePath.push( [ turtlePath[s][0], turtlePath[s - 1][1] ] ) + } else if ( addPoint ) { + fixedTurtlePath.push( [ turtlePath[s - 1][0], turtlePath[s][1] ] ) + } + + fixedTurtlePath.push( turtlePath[s] ) + + } + + // printjson( fixedTurtlePath ) + + turtlePaths.push( fixedTurtlePath ) + + } + + // Uncomment to print polygon shape + // print( grid.toString() ) + + var polygon = [] + for ( var t = 0; t < turtlePaths.length; t++ ) { + for ( var s = 0; s < turtlePaths[t].length; s++ ) { + polygon.push( rotatePoint( turtlePaths[t][s] ) ) + } + } + + // Uncomment to print out polygon + // printjson( polygon ) + + t = db.polytest2 + t.drop() + + // Test single and multi-location documents + var pointsIn = 0 + var pointsOut = 0 + var allPointsIn = [] + var allPointsOut = [] + + for ( var j = grid[0].length - 1; j >= 0; j-- ) { + for ( var i = 0; i < grid.length; i++ ) { + + var point = rotatePoint( [ i + 0.5, j + 0.5 ] ) + + t.insert( { loc : point } ) + if ( grid[i][j] != undefined ){ + allPointsIn.push( point ) + pointsIn++ + } + else{ + allPointsOut.push( point ) + pointsOut++ + } + } + } + + t.ensureIndex( { loc : "2d" }, { bits : 1 + bits, max : bounds[1], min : bounds[0] } ) + assert.isnull( db.getLastError() ) + + t.insert( { loc : allPointsIn } ) + t.insert( { loc : allPointsOut } ) + allPoints = allPointsIn.concat( allPointsOut ) + t.insert( { loc : allPoints } ) + + print( "Points : " ) + printjson( { pointsIn : pointsIn, pointsOut : pointsOut } ) + //print( t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() ) + + assert.eq( gridSize[0] * gridSize[1] + 3, t.find().count() ) + assert.eq( 2 + pointsIn, t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() ); + +} diff --git a/jstests/geo_polygon3.js b/jstests/geo_polygon3.js new file mode 100644 index 0000000..9fdff1a --- /dev/null +++ b/jstests/geo_polygon3.js @@ -0,0 +1,54 @@ +// +// Tests for polygon querying with varying levels of accuracy +// + +var numTests = 31; + +for( var n = 0; n < numTests; n++ ){ + + t = db.geo_polygon3; + t.drop(); + + num = 0; + for ( x=1; x < 9; x++ ){ + for ( y= 1; y < 9; y++ ){ + o = { _id : num++ , loc : [ x , y ] }; + t.save( o ); + } + } + + t.ensureIndex( { loc : "2d" }, { bits : 2 + n } ); + + triangle = [[0,0], [1,1], [0,2]]; + + // Look at only a small slice of the data within a triangle + assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" ); + + + boxBounds = [ [0,0], [0,10], [10,10], [10,0] ]; + + assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" ); + + // Look in a box much bigger than the one we have data in + boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]]; + assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" ); + + t.drop(); + + pacman = [ + [0,2], [0,4], [2,6], [4,6], // Head + [6,4], [4,3], [6,2], // Mouth + [4,0], [2,0] // Bottom + ]; + + t.save({loc: [1,3] }); // Add a point that's in + t.ensureIndex( { loc : "2d" }, { bits : 2 + t } ); + + assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" ); + + t.save({ loc : [5, 3] }) // Add a point that's out right in the mouth opening + t.save({ loc : [3, 7] }) // Add a point above the center of the head + t.save({ loc : [3,-1] }) // Add a point below the center of the bottom + + assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" ); +} diff --git a/jstests/geo_regex0.js b/jstests/geo_regex0.js new file mode 100644 index 0000000..79042b9 --- /dev/null +++ b/jstests/geo_regex0.js @@ -0,0 +1,18 @@ +// From SERVER-2247 +// Tests to make sure regex works with geo indices + +t = db.regex0 +t.drop() + +t.ensureIndex( { point : '2d', words : 1 } ) +t.insert( { point : [ 1, 1 ], words : [ 'foo', 'bar' ] } ) + +regex = { words : /^f/ } +geo = { point : { $near : [ 1, 1 ] } } +both = { point : { $near : [ 1, 1 ] }, words : /^f/ } + +assert.eq(1, t.find( regex ).count() ) +assert.eq(1, t.find( geo ).count() ) +assert.eq(1, t.find( both ).count() ) + + diff --git a/jstests/geo_small_large.js b/jstests/geo_small_large.js new file mode 100644 index 0000000..aff4743 --- /dev/null +++ b/jstests/geo_small_large.js @@ -0,0 +1,151 @@ +// SERVER-2386, general geo-indexing using very large and very small bounds + +load( "jstests/libs/geo_near_random.js" ); + +// Do some random tests (for near queries) with very large and small ranges + +var test = new GeoNearRandomTest( "geo_small_large" ); + +bounds = { min : -Math.pow( 2, 34 ), max : Math.pow( 2, 34 ) }; + +test.insertPts( 50, bounds ); + +printjson( db["geo_small_large"].find().limit( 10 ).toArray() ) + +test.testPt( [ 0, 0 ] ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); + +test = new GeoNearRandomTest( "geo_small_large" ); + +bounds = { min : -Math.pow( 2, -34 ), max : Math.pow( 2, -34 ) }; + +test.insertPts( 50, bounds ); + +printjson( db["geo_small_large"].find().limit( 10 ).toArray() ) + +test.testPt( [ 0, 0 ] ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); +test.testPt( test.mkPt( undefined, bounds ) ); + + +// Check that our box and circle queries also work +var scales = [ Math.pow( 2, 40 ), Math.pow( 2, -40 ), Math.pow(2, 2), Math.pow(3, -15), Math.pow(3, 15) ] + +for ( var i = 0; i < scales.length; i++ ) { + + scale = scales[i]; + + var eps = Math.pow( 2, -7 ) * scale; + var radius = 5 * scale; + var max = 10 * scale; + var min = -max; + var range = max - min; + var bits = 2 + Math.random() * 30 + + var t = db["geo_small_large"] + t.drop(); + t.ensureIndex( { p : "2d" }, { min : min, max : max, bits : bits }) + + var outPoints = 0; + var inPoints = 0; + + printjson({ eps : eps, radius : radius, max : max, min : min, range : range, bits : bits }) + + // Put a point slightly inside and outside our range + for ( var j = 0; j < 2; j++ ) { + var currRad = ( j % 2 == 0 ? radius + eps : radius - eps ); + t.insert( { p : { x : currRad, y : 0 } } ); + print( db.getLastError() ) + } + + printjson( t.find().toArray() ); + + assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1, "Incorrect center points found!" ) + assert.eq( t.count( { p : { $within : { $box : [ [ -radius, -radius ], [ radius, radius ] ] } } } ), 1, + "Incorrect box points found!" ) + + shouldFind = [] + randoms = [] + + for ( var j = 0; j < 2; j++ ) { + + var randX = Math.random(); // randoms[j].randX + var randY = Math.random(); // randoms[j].randY + + randoms.push({ randX : randX, randY : randY }) + + var x = randX * ( range - eps ) + eps + min; + var y = randY * ( range - eps ) + eps + min; + + t.insert( { p : [ x, y ] } ); + + if ( x * x + y * y > radius * radius ){ + // print( "out point "); + // printjson({ x : x, y : y }) + outPoints++ + } + else{ + // print( "in point "); + // printjson({ x : x, y : y }) + inPoints++ + shouldFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) }) + } + } + + /* + function printDiff( didFind, shouldFind ){ + + for( var i = 0; i < shouldFind.length; i++ ){ + var beenFound = false; + for( var j = 0; j < didFind.length && !beenFound ; j++ ){ + beenFound = shouldFind[i].x == didFind[j].x && + shouldFind[i].y == didFind[j].y + } + + if( !beenFound ){ + print( "Could not find: " ) + shouldFind[i].inRadius = ( radius - shouldFind[i].radius >= 0 ) + printjson( shouldFind[i] ) + } + } + } + + print( "Finding random pts... ") + var found = t.find( { p : { $within : { $center : [[0, 0], radius ] } } } ).toArray() + var didFind = [] + for( var f = 0; f < found.length; f++ ){ + //printjson( found[f] ) + var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0] + var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1] + didFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) }) + } + + print( "Did not find but should: ") + printDiff( didFind, shouldFind ) + print( "Found but should not have: ") + printDiff( shouldFind, didFind ) + */ + + assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1 + inPoints, + "Incorrect random center points found!\n" + tojson( randoms ) ) + + print("Found " + inPoints + " points in and " + outPoints + " points out."); + + var found = t.find( { p : { $near : [0, 0], $maxDistance : radius } } ).toArray() + var dist = 0; + for( var f = 0; f < found.length; f++ ){ + var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0] + var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1] + print( "Dist: x : " + x + " y : " + y + " dist : " + Math.sqrt( x * x + y * y) + " radius : " + radius ) + } + + assert.eq( t.count( { p : { $near : [0, 0], $maxDistance : radius } } ), 1 + inPoints, + "Incorrect random center points found near!\n" + tojson( randoms ) ) + +} + diff --git a/jstests/geo_uniqueDocs.js b/jstests/geo_uniqueDocs.js new file mode 100644 index 0000000..b77a3b4 --- /dev/null +++ b/jstests/geo_uniqueDocs.js @@ -0,0 +1,38 @@ +// Test uniqueDocs option for $within and geoNear queries SERVER-3139 + +collName = 'geo_uniqueDocs_test' +t = db.geo_uniqueDocs_test +t.drop() + +t.save( { locs : [ [0,2], [3,4]] } ) +t.save( { locs : [ [6,8], [10,10] ] } ) + +t.ensureIndex( { locs : '2d' } ) + +// geoNear tests +assert.eq(4, db.runCommand({geoNear:collName, near:[0,0]}).results.length) +assert.eq(4, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:false}).results.length) +assert.eq(2, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:true}).results.length) +results = db.runCommand({geoNear:collName, near:[0,0], num:2}).results +assert.eq(2, results.length) +assert.eq(2, results[0].dis) +assert.eq(5, results[1].dis) +results = db.runCommand({geoNear:collName, near:[0,0], num:2, uniqueDocs:true}).results +assert.eq(2, results.length) +assert.eq(2, results[0].dis) +assert.eq(10, results[1].dis) + +// $within tests + +assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]]}}}).count()) +assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : true}}}).count()) +assert.eq(3, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : false}}}).count()) + +assert.eq(2, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : true}}}).count()) +assert.eq(3, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : false}}}).count()) + +assert.eq(2, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : true}}}).count()) +assert.eq(4, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : false}}}).count()) + +assert.eq(2, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : true}}}).count()) +assert.eq(3, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : false}}}).count()) diff --git a/jstests/getlog1.js b/jstests/getlog1.js new file mode 100644 index 0000000..75fbeab --- /dev/null +++ b/jstests/getlog1.js @@ -0,0 +1,24 @@ +// to run: +// ./mongo jstests/ + +contains = function(arr,obj) { + var i = arr.length; + while (i--) { + if (arr[i] === obj) { + return true; + } + } + return false; +} + +var resp = db.adminCommand({getLog:"*"}) +assert( resp.ok == 1, "error executing getLog command" ); +assert( resp.names, "no names field" ); +assert( resp.names.length > 0, "names array is empty" ); +assert( contains(resp.names,"global") , "missing global category" ); +assert( !contains(resp.names,"butty") , "missing butty category" ); + +resp = db.adminCommand({getLog:"global"}) +assert( resp.ok == 1, "error executing getLog command" ); +assert( resp.log, "no log field" ); +assert( resp.log.length > 0 , "no log lines" ); diff --git a/jstests/group7.js b/jstests/group7.js new file mode 100644 index 0000000..5bf9232 --- /dev/null +++ b/jstests/group7.js @@ -0,0 +1,43 @@ +// Test yielding group command SERVER-1395 + +t = db.jstests_group7; +t.drop(); + +function checkForYield( docs, updates ) { + t.drop(); + a = 0; + for( var i = 0; i < docs; ++i ) { + t.save( {a:a} ); + } + db.getLastError(); + + // Iteratively update all a values atomically. + p = startParallelShell( 'for( a = 0; a < ' + updates + '; ++a ) { db.jstests_group7.update( {$atomic:true}, {$set:{a:a}}, false, true ); db.getLastError(); }' ); + + for( var i = 0; i < updates; ++i ) { + ret = t.group({key:{a:1},reduce:function(){},initial:{}}); + // Check if group sees more than one a value, indicating that it yielded. + if ( ret.length > 1 ) { + p(); + return true; + } + printjson( ret ); + } + + p(); + return false; +} + +var yielded = false; +var docs = 1500; +var updates = 50; +for( var j = 1; j <= 6; ++j ) { + if ( checkForYield( docs, updates ) ) { + yielded = true; + break; + } + // Increase docs and updates to encourage yielding. + docs *= 2; + updates *= 2; +} +assert( yielded ); \ No newline at end of file diff --git a/jstests/hint1.js b/jstests/hint1.js index 63a5fa6..b5a580f 100644 --- a/jstests/hint1.js +++ b/jstests/hint1.js @@ -5,6 +5,12 @@ p.drop(); p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true } ); p.ensureIndex( { ts: 1 } ); -e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: " alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain(); -assert.eq( e.indexBounds.ts[0][0].getTime(), new Date( 1234119308272 ).getTime() , "A" ); -assert.eq( 0 , e.indexBounds.ts[0][1].getTime() , "B" ); +e = p.find( { live: true, ts: { $lt: new Date( 1234119308272 ) }, cls: "entry", verticals: "alleyinsider" } ).sort( { ts: -1 } ).hint( { ts: 1 } ).explain(); +assert.eq(e.indexBounds.ts[0][0].getTime(), new Date(1234119308272).getTime(), "A"); + +//printjson(e); + +assert.eq( /*just below min date is bool true*/true, e.indexBounds.ts[0][1], "B"); + +assert.eq(1, p.find({ live: true, ts: { $lt: new Date(1234119308272) }, cls: "entry", verticals: "alleyinsider" }).sort({ ts: -1 }).hint({ ts: 1 }).count()); + diff --git a/jstests/idhack.js b/jstests/idhack.js new file mode 100644 index 0000000..9614ebc --- /dev/null +++ b/jstests/idhack.js @@ -0,0 +1,23 @@ + +t = db.idhack +t.drop() + + +t.insert( { _id : { x : 1 } , z : 1 } ) +t.insert( { _id : { x : 2 } , z : 2 } ) +t.insert( { _id : { x : 3 } , z : 3 } ) +t.insert( { _id : 1 , z : 4 } ) +t.insert( { _id : 2 , z : 5 } ) +t.insert( { _id : 3 , z : 6 } ) + +assert.eq( 2 , t.findOne( { _id : { x : 2 } } ).z , "A1" ) +assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).count() , "A2" ) +assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).itcount() , "A3" ) + +t.update( { _id : { x : 2 } } , { $set : { z : 7 } } ) +assert.eq( 7 , t.findOne( { _id : { x : 2 } } ).z , "B1" ) + +t.update( { _id : { $gte : 2 } } , { $set : { z : 8 } } , false , true ) +assert.eq( 4 , t.findOne( { _id : 1 } ).z , "C1" ) +assert.eq( 8 , t.findOne( { _id : 2 } ).z , "C2" ) +assert.eq( 8 , t.findOne( { _id : 3 } ).z , "C3" ) diff --git a/jstests/in8.js b/jstests/in8.js new file mode 100644 index 0000000..5e7e587 --- /dev/null +++ b/jstests/in8.js @@ -0,0 +1,23 @@ +// SERVER-2829 Test arrays matching themselves within a $in expression. + +t = db.jstests_in8; +t.drop(); + +t.save( {key: [1]} ); +t.save( {key: ['1']} ); +t.save( {key: [[2]]} ); + +function doTest() { + assert.eq( 1, t.count( {key:[1]} ) ); + assert.eq( 1, t.count( {key:{$in:[[1]]}} ) ); + assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) ); + assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) ); + assert.eq( 1, t.count( {key:['1']} ) ); + assert.eq( 1, t.count( {key:{$in:[['1']]}} ) ); + assert.eq( 1, t.count( {key:[2]} ) ); + assert.eq( 1, t.count( {key:{$in:[[2]]}} ) ); +} + +doTest(); +t.ensureIndex( {key:1} ); +doTest(); diff --git a/jstests/in9.js b/jstests/in9.js new file mode 100644 index 0000000..34cefb8 --- /dev/null +++ b/jstests/in9.js @@ -0,0 +1,35 @@ +// SERVER-2343 Test $in empty array matching. + +t = db.jstests_in9; +t.drop(); + +function someData() { + t.remove(); + t.save( {key: []} ); +} + +function moreData() { + someData(); + t.save( {key: [1]} ); + t.save( {key: ['1']} ); + t.save( {key: null} ); + t.save( {} ); +} + +function check() { + assert.eq( 1, t.count( {key:[]} ) ); + assert.eq( 1, t.count( {key:{$in:[[]]}} ) ); +} + +function doTest() { + someData(); + check(); + moreData(); + check(); +} + +doTest(); + +// SERVER-1943 not fixed yet +t.ensureIndex( {key:1} ); +doTest(); diff --git a/jstests/ina.js b/jstests/ina.js new file mode 100644 index 0000000..cf614ab --- /dev/null +++ b/jstests/ina.js @@ -0,0 +1,15 @@ +// Uassert when $elemMatch is attempted within $in SERVER-3545 + +t = db.jstests_ina; +t.drop(); +t.save( {} ); + +assert.throws( function() { t.find( {a:{$in:[{$elemMatch:{b:1}}]}} ).itcount(); } ); +assert.throws( function() { t.find( {a:{$not:{$in:[{$elemMatch:{b:1}}]}}} ).itcount(); } ); + +assert.throws( function() { t.find( {a:{$nin:[{$elemMatch:{b:1}}]}} ).itcount(); } ); +assert.throws( function() { t.find( {a:{$not:{$nin:[{$elemMatch:{b:1}}]}}} ).itcount(); } ); + +// NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally +// we assume that the first key is $elemMatch if any key is, and validating +// every key is expensive in some cases. \ No newline at end of file diff --git a/jstests/index11.js b/jstests/index11.js index 2a552dd..0f6aa33 100644 --- a/jstests/index11.js +++ b/jstests/index11.js @@ -1,13 +1,29 @@ // Reindex w/ field too large to index coll = db.jstests_index11; -coll.drop(); +coll.drop(); + +var str = "xxxxxxxxxxxxxxxx"; +str = str + str; +str = str + str; +str = str + str; +str = str + str; +str = str + str; +str = str + str; +str = str + str; +str = str + str; +str = str + 'q'; + +coll.insert({ k: 'a', v: str }); + +assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 1"); -coll.ensureIndex({"k": 1, "v": 1}); -coll.insert({k: "x", v: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}); -assert.eq(0, coll.find({"k": "x"}).count()); // SERVER-1716 +coll.ensureIndex({"k": 1, "v": 1}); +coll.insert({ k: "x", v: str }); -coll.dropIndexes(); -coll.ensureIndex({"k": 1, "v": 1}); +assert.eq(0, coll.find({"k": "x"}).count(), "B"); // SERVER-1716 -assert.eq(0, coll.find({"k": "x"}).count()); +coll.dropIndexes(); +coll.ensureIndex({"k": 1, "v": 1}); + +assert.eq(0, coll.find({ "k": "x" }).count(), "expected zero keys 2"); diff --git a/jstests/index9.js b/jstests/index9.js index c832783..04b9009 100644 --- a/jstests/index9.js +++ b/jstests/index9.js @@ -1,7 +1,15 @@ t = db.jstests_index9; +t.drop(); +db.createCollection( "jstests_index9" ); +assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index with default collection" ); +t.drop(); +db.createCollection( "jstests_index9", {autoIndexId: true} ); +assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 1 index if autoIndexId: true" ); + t.drop(); db.createCollection( "jstests_index9", {autoIndexId:false} ); +assert.eq( 0, db.system.indexes.count( {ns: "test.jstests_index9"} ), "There should be 0 index if autoIndexId: false" ); t.createIndex( { _id:1 } ); assert.eq( 1, db.system.indexes.count( {ns: "test.jstests_index9"} ) ); t.createIndex( { _id:1 } ); diff --git a/jstests/index_big1.js b/jstests/index_big1.js new file mode 100644 index 0000000..61260a3 --- /dev/null +++ b/jstests/index_big1.js @@ -0,0 +1,39 @@ +// check where "key to big" happens + +t = db.index_big1; + +N = 3200; +t.drop(); + +var s = ""; + +for ( i=0; i= 1; i--) { + t.insert({ _id: i, k: keys[i] }); + } + } +} + +var expect = null; + +var ok = true; + +function check() { + assert(t.validate().valid); + + var c = t.find({ k: /^a/ }).count(); + + print("keycount:" + c); + + if (expect) { + if (expect != c) { + print("count of keys doesn't match expected count of : " + expect + " got: " + c); + ok = false; + } + } + else { + expect = c; + } + + //print(t.validate().result); +} + +for (var pass = 1; pass <= 2; pass++) { + print("pass:" + pass); + + t.drop(); + t.ensureIndex({ k: 1 }); + go(); + check(); // check incremental addition + + t.reIndex(); + check(); // check bottom up + + t.drop(); + go(); + t.ensureIndex({ k: 1 }); + check(); // check bottom up again without reindex explicitly + + t.drop(); + go(); + t.ensureIndex({ k: 1 }, { background: true }); + check(); // check background (which should be incremental) + + dir = -1; +} + +assert(ok,"not ok"); diff --git a/jstests/index_check5.js b/jstests/index_check5.js index 90ac301..eabb929 100644 --- a/jstests/index_check5.js +++ b/jstests/index_check5.js @@ -14,4 +14,4 @@ t.save( { "name" : "Player2" , assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "A" ); t.ensureIndex( { "scores.level" : 1 , "scores.score" : 1 } ); -assert.eq( 1 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" ); +assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" ); diff --git a/jstests/index_check8.js b/jstests/index_check8.js index bc267df..1964ecb 100644 --- a/jstests/index_check8.js +++ b/jstests/index_check8.js @@ -4,12 +4,18 @@ t.drop(); t.insert( { a : 1 , b : 1 , c : 1 , d : 1 , e : 1 } ) t.ensureIndex( { a : 1 , b : 1 , c : 1 } ) -t.ensureIndex( { a : 1 , b : 1 , d : 1 , e : 1 } ) +t.ensureIndex({ a: 1, b: 1, d: 1, e: 1 }) + +// this block could be added to many tests in theory... +if ((new Date()) % 10 == 0) { + var coll = t.toString().substring(db.toString().length + 1); + print("compacting " + coll + " before continuing testing"); + // don't check return code - false for mongos + print("ok: " + db.runCommand({ compact: coll, dev: true })); +} x = t.find( { a : 1 , b : 1 , d : 1 } ).sort( { e : 1 } ).explain() assert( ! x.scanAndOrder , "A : " + tojson( x ) ) x = t.find( { a : 1 , b : 1 , c : 1 , d : 1 } ).sort( { e : 1 } ).explain() //assert( ! x.scanAndOrder , "B : " + tojson( x ) ) - - diff --git a/jstests/index_fornew.js b/jstests/index_fornew.js deleted file mode 100644 index 6c3c158..0000000 --- a/jstests/index_fornew.js +++ /dev/null @@ -1,13 +0,0 @@ - -t = db.index_fornew; -t.drop(); - -t.insert( { x : 1 } ) -t.ensureIndex( { x : 1 } , { v : 1 } ) -assert.eq( 1 , t.getIndexes()[1].v , tojson( t.getIndexes() ) ); - -assert.throws( function(){ t.findOne( { x : 1 } ); } ) - -t.reIndex(); -assert.eq( 0 , t.getIndexes()[1].v , tojson( t.getIndexes() ) ); -assert( t.findOne( { x : 1 } ) ); diff --git a/jstests/index_maxkey.js b/jstests/index_maxkey.js new file mode 100644 index 0000000..eba8126 --- /dev/null +++ b/jstests/index_maxkey.js @@ -0,0 +1,27 @@ + +t = db.index_maxkey; + +for ( var indexVersion=0; indexVersion<=1; indexVersion++ ) { + t.drop(); + + s = ""; + + t.ensureIndex( { s : 1 } , { v : indexVersion } ); + while ( true ) { + t.insert( { s : s } ); + if ( t.find().count() == t.find().sort( { s : 1 } ).itcount() ) { + s += "....."; + continue; + } + var sz = Object.bsonsize( { s : s } ) - 2; + print( "indexVersion: " + indexVersion + " max key is : " + sz ); + if ( indexVersion == 0 ) { + assert.eq( 821 , sz ); + } + else if ( indexVersion == 1 ) { + assert.eq( 1026 , sz ); + } + break; + } + +} diff --git a/jstests/indexbindata.js b/jstests/indexbindata.js new file mode 100755 index 0000000..e69de29 diff --git a/jstests/indexk.js b/jstests/indexk.js new file mode 100644 index 0000000..7cef95a --- /dev/null +++ b/jstests/indexk.js @@ -0,0 +1,58 @@ +// Check correct result set when bounds each match different multikeys SERVER-958 + +t = db.jstests_indexk; +t.drop(); + +t.insert({a:[1,10]}); + +assert.eq( 1, t.count({a: {$gt:2, $lt:5}}) ); +assert.eq( 1, t.count({a: {$gt:2}}) ); +assert.eq( 1, t.count({a: {$lt:5}}) ); + +assert.eq( 1, t.count({a: {$gt:5, $lt:2}}) ); +assert.eq( 1, t.count({a: {$gt:5}}) ); +assert.eq( 1, t.count({a: {$lt:2}}) ); + +t.ensureIndex({a:1}); + +// Check that only one constraint limits the index range for a multikey index. +// The constraint used is arbitrary, but testing current behavior here. + +assert.eq( 1, t.count({a: {$gt: 2, $lt:5}}) ); +e = t.find({a: {$gt: 2, $lt:5}}).explain(); +assert.eq( 1, e.nscanned ); +assert.eq( 1, e.n ); +assert.eq( 2, e.indexBounds.a[ 0 ][ 0 ] ); +// Check that upper bound is large ( > 5 ). +assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] ); + +assert.eq( 1, t.count({a: {$lt: 5, $gt:2}}) ); +e = t.find({a: {$lt: 5, $gt:2}}).explain(); +assert.eq( 1, e.nscanned ); +assert.eq( 1, e.n ); +// Check that upper bound is low ( < 2 ). +assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] ); +assert.eq( 5, e.indexBounds.a[ 0 ][ 1 ] ); + +// Now check cases where no match is possible with a single key index. + +assert.eq( 1, t.count({a: {$gt: 5, $lt:2}}) ); +e = t.find({a: {$gt: 5, $lt:2}}).explain(); +assert.eq( 1, e.nscanned ); +assert.eq( 1, e.n ); +assert.eq( 5, e.indexBounds.a[ 0 ][ 0 ] ); +// Check that upper bound is low ( < 2 ). +assert.lt( 1000, e.indexBounds.a[ 0 ][ 1 ] ); + +assert.eq( 1, t.count({a: {$lt: 2, $gt:5}}) ); +e = t.find({a: {$lt: 2, $gt:5}}).explain(); +assert.eq( 1, e.nscanned ); +assert.eq( 1, e.n ); +// Check that upper bound is large ( > 5 ). +assert.gt( -1000, e.indexBounds.a[ 0 ][ 0 ] ); +assert.eq( 2, e.indexBounds.a[ 0 ][ 1 ] ); + +assert.eq( 1, t.count({a: {$gt: 2}}) ); +assert.eq( 1, t.count({a: {$lt: 5}}) ); + +// Check good performance of single key index \ No newline at end of file diff --git a/jstests/indexl.js b/jstests/indexl.js new file mode 100644 index 0000000..666586d --- /dev/null +++ b/jstests/indexl.js @@ -0,0 +1,27 @@ +// Check nonoverlapping $in/$all with multikeys SERVER-2165 + +t = db.jstests_indexl; + +function test(t) { + t.save( {a:[1,2]} ); + assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) ); + assert.eq( 1, t.count( {a:{$all:[2],$in:[1]}} ) ); + assert.eq( 1, t.count( {a:{$in:[2],$all:[1]}} ) ); + assert.eq( 1, t.count( {a:{$in:[1],$all:[2]}} ) ); + assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) ); + t.save({a:[3,4]}) + t.save({a:[2,3]}) + t.save({a:[1,2,3,4]}) + assert.eq( 2, t.count( {a:{$in:[2],$all:[1]}} ) ); + assert.eq( 1, t.count( {a:{$in:[3],$all:[1,2]}} ) ); + assert.eq( 1, t.count( {a:{$in:[1],$all:[3]}} ) ); + assert.eq( 2, t.count( {a:{$in:[2,3],$all:[1]}} ) ); + assert.eq( 1, t.count( {a:{$in:[4],$all:[2,3]}} ) ); + assert.eq( 3, t.count( {a:{$in:[1,3],$all:[2]}} ) ); +} + +t.drop(); +test(t); +t.drop(); +t.ensureIndex( {a:1} ); +test(t); \ No newline at end of file diff --git a/jstests/indexm.js b/jstests/indexm.js new file mode 100644 index 0000000..6b31ea6 --- /dev/null +++ b/jstests/indexm.js @@ -0,0 +1,38 @@ +// Check proper range combinations with or clauses overlapping non or portion of query SERVER-2302 + +t = db.jstests_indexm; +t.drop(); + +t.save( { a : [ { x : 1 } , { x : 2 } , { x : 3 } , { x : 4 } ] } ) + +function test(){ + assert.eq( 1, t.count( + { + a : { x : 1 } , + "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ] + } + ) ); +} + +// The first find will return a result since there isn't an index. +test(); + +// Now create an index. +t.ensureIndex({"a":1}); +test(); +// SERVER-3105 +//assert( !t.find( +// { +// a : { x : 1 } , +// "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ] +// } +// ).explain().clauses ); + +// Now create a different index. +t.dropIndexes(); +t.ensureIndex({"a.x":1}); +test(); + +// Drop the indexes. +t.dropIndexes(); +test(); \ No newline at end of file diff --git a/jstests/indexn.js b/jstests/indexn.js new file mode 100644 index 0000000..d5800e4 --- /dev/null +++ b/jstests/indexn.js @@ -0,0 +1,41 @@ +// Check fast detection of empty result set with a single key index SERVER-958. + +t = db.jstests_indexn; +t.drop(); + +function checkImpossibleMatchDummyCursor( explain ) { + assert.eq( 'BasicCursor', explain.cursor ); + assert.eq( 0, explain.nscanned ); + assert.eq( 0, explain.n ); +} + +t.save( {a:1,b:[1,2]} ); + +t.ensureIndex( {a:1} ); +t.ensureIndex( {b:1} ); + +assert.eq( 0, t.count( {a:{$gt:5,$lt:0}} ) ); +// {a:1} is a single key index, so no matches are possible for this query +checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() ); + +assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:2} ) ); +checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:2} ).explain() ); + +assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ) ); +checkImpossibleMatchDummyCursor( t.find( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ).explain() ); + +assert.eq( 1, t.count( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ) ); +checkImpossibleMatchDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] ); + +// A following invalid range is eliminated. +assert.eq( 1, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ) ); +assert.eq( null, t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain().clauses ); + +t.save( {a:2} ); + +// An intermediate invalid range is eliminated. +assert.eq( 2, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ) ); +explain = t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ).explain(); +assert.eq( 2, explain.clauses.length ); +assert.eq( [[1,1]], explain.clauses[ 0 ].indexBounds.a ); +assert.eq( [[2,2]], explain.clauses[ 1 ].indexBounds.a ); diff --git a/jstests/indexo.js b/jstests/indexo.js new file mode 100644 index 0000000..e50c099 --- /dev/null +++ b/jstests/indexo.js @@ -0,0 +1,32 @@ +// Check that dummy basic cursors work correctly SERVER-958. + +t = db.jstests_indexo; +t.drop(); + +function checkDummyCursor( explain ) { + assert.eq( "BasicCursor", explain.cursor ); + assert.eq( 0, explain.nscanned ); + assert.eq( 0, explain.n ); +} + +t.save( {a:1} ); + +t.ensureIndex( {a:1} ); + +// Match is impossible, so no documents should be scanned. +checkDummyCursor( t.find( {a:{$gt:5,$lt:0}} ).explain() ); + +t.drop(); +checkDummyCursor( t.find( {a:1} ).explain() ); + +t.save( {a:1} ); +t.ensureIndex( {a:1} ); +checkDummyCursor( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain().clauses[ 0 ] ); + +t.drop(); +t.save( {a:5,b:[1,2]} ); +t.ensureIndex( {a:1,b:1} ); +t.ensureIndex( {a:1} ); +// The first clause will use index {a:1,b:1} with the current implementation. +// The second clause has no valid values for index {a:1} so it will use a dummy cursor. +checkDummyCursor( t.find( {$or:[{b:{$exists:true},a:{$gt:4}},{a:{$lt:6,$gt:4}}]} ).explain().clauses[ 1 ] ); diff --git a/jstests/indexp.js b/jstests/indexp.js new file mode 100644 index 0000000..ee511eb --- /dev/null +++ b/jstests/indexp.js @@ -0,0 +1,58 @@ +// Check recording and playback of good query plans with different index types SERVER-958. + +t = db.jstests_indexp; +t.drop(); + +function expectRecordedPlan( query, idx ) { + assert.eq( "BtreeCursor " + idx, t.find( query ).explain( true ).oldPlan.cursor ); +} + +function expectNoRecordedPlan( query ) { + assert.isnull( t.find( query ).explain( true ).oldPlan ); +} + +// Basic test +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:1} ); +t.find( {a:1} ).itcount(); +expectRecordedPlan( {a:1}, "a_1" ); + +// Index type changes +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:1} ); +t.find( {a:1} ).itcount(); +t.save( {a:[1,2]} ); +expectRecordedPlan( {a:1}, "a_1" ); + +// Multi key QueryPattern reuses index +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:[1,2]} ); +t.find( {a:{$gt:0}} ).itcount(); +expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" ); + +// Single key QueryPattern can still be used to find best plan - at least for now. +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:1} ); +t.find( {a:{$gt:0,$lt:5}} ).itcount(); +t.save( {a:[1,2]} ); +expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" ); + +// Invalid query with only valid fields used +if ( 0 ) { // SERVER-2864 +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:1} ); +t.find( {a:1,b:{$gt:5,$lt:0}} ).itcount(); +expectRecordedPlan( {a:{$gt:0,$lt:5}}, "a_1" ); +} + +// Dummy query plan not stored +t.drop(); +t.ensureIndex( {a:1} ); +t.save( {a:1} ); +t.find( {a:{$gt:5,$lt:0}} ).itcount(); +expectNoRecordedPlan( {a:{$gt:5,$lt:0}} ); \ No newline at end of file diff --git a/jstests/indexq.js b/jstests/indexq.js new file mode 100644 index 0000000..f067b3c --- /dev/null +++ b/jstests/indexq.js @@ -0,0 +1,14 @@ +// Test multikey range preference for a fully included range SERVER-958. + +t = db.jstests_indexq; +t.drop(); + +t.ensureIndex( {a:1} ); +// Single key index +assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] ); +assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a ); + +t.save( {a:[1,3]} ); +// Now with multi key index. +assert.eq( 5, t.find( {a:{$gt:4,$gte:5}} ).explain().indexBounds.a[ 0 ][ 0 ] ); +assert.eq( [[1,1],[2,2]], t.find( {a:{$in:[1,2,3]},$or:[{a:{$in:[1,2]}}]} ).explain().indexBounds.a ); diff --git a/jstests/indexr.js b/jstests/indexr.js new file mode 100644 index 0000000..60ecfb1 --- /dev/null +++ b/jstests/indexr.js @@ -0,0 +1,47 @@ +// Check multikey index cases with parallel nested fields SERVER-958. + +t = db.jstests_indexr; +t.drop(); + +// Check without indexes. +t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } ); +assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) ); + +// Check with single key indexes. +t.remove(); +t.ensureIndex( {'a.b':1,'a.c':1} ); +t.ensureIndex( {a:1,'a.c':1} ); +assert.eq( 0, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 0, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] ); +assert.eq( 4, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] ); + +t.save( { a: { b: 3, c: 3 } } ); +assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 1, t.count( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 4, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] ); +assert.eq( 4, t.find( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'][0][1] ); + +// Check with multikey indexes. +t.remove(); +t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } ); + +assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) ); +assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) ); +assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] ); +assert.eq( [[{$minElement:1},{$maxElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).explain().indexBounds['a.c'] ); + +// Check reverse direction. +assert.eq( 1, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).itcount() ); +assert.eq( 1, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).itcount() ); + +assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).explain().indexBounds['a.c'] ); +assert.eq( [[{$maxElement:1},{$minElement:1}]], t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).explain().indexBounds['a.c'] ); + +// Check second field is constrained if first is not. +assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).itcount() ); +assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).itcount() ); + +assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).explain().indexBounds['a.c'][0][1] ); +assert.eq( 4, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).explain().indexBounds['a.c'][0][1] ); diff --git a/jstests/indexs.js b/jstests/indexs.js new file mode 100644 index 0000000..609f912 --- /dev/null +++ b/jstests/indexs.js @@ -0,0 +1,21 @@ +// Test index key generation issue with parent and nested fields in same index and array containing subobject SERVER-3005. + +t = db.jstests_indexs; + +t.drop(); +t.ensureIndex( {a:1} ); +t.save( { a: [ { b: 3 } ] } ); +assert.eq( 1, t.count( { a:{ b:3 } } ) ); + +t.drop(); +t.ensureIndex( {a:1,'a.b':1} ); +t.save( { a: { b: 3 } } ); +assert.eq( 1, t.count( { a:{ b:3 } } ) ); +ib = t.find( { a:{ b:3 } } ).explain().indexBounds; + +t.drop(); +t.ensureIndex( {a:1,'a.b':1} ); +t.save( { a: [ { b: 3 } ] } ); +assert.eq( ib, t.find( { a:{ b:3 } } ).explain().indexBounds ); +assert.eq( 1, t.find( { a:{ b:3 } } ).explain().nscanned ); +assert.eq( 1, t.count( { a:{ b:3 } } ) ); diff --git a/jstests/indext.js b/jstests/indext.js new file mode 100644 index 0000000..e418dc2 --- /dev/null +++ b/jstests/indext.js @@ -0,0 +1,21 @@ +// Sparse indexes with arrays SERVER-3216 + +t = db.jstests_indext; +t.drop(); + +t.ensureIndex( {'a.b':1}, {sparse:true} ); +t.save( {a:[]} ); +t.save( {a:1} ); +assert.eq( 0, t.find().hint( {'a.b':1} ).itcount() ); +assert.eq( 0, t.find().hint( {'a.b':1} ).explain().nscanned ); + +t.ensureIndex( {'a.b':1,'a.c':1}, {sparse:true} ); +t.save( {a:[]} ); +t.save( {a:1} ); +assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).itcount() ); +assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned ); + +t.save( {a:[{b:1}]} ); +t.save( {a:1} ); +assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).itcount() ); +assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).explain().nscanned ); diff --git a/jstests/indexu.js b/jstests/indexu.js new file mode 100644 index 0000000..c7fa8ed --- /dev/null +++ b/jstests/indexu.js @@ -0,0 +1,137 @@ +// Test index key generation with duplicate values addressed by array index and +// object field. SERVER-2902 + +t = db.jstests_indexu; +t.drop(); + +var dupDoc = {a:[{'0':1}]}; // There are two 'a.0' fields in this doc. +var dupDoc2 = {a:[{'1':1},'c']}; +var noDupDoc = {a:[{'1':1}]}; + +// Test that we can't index dupDoc. +t.save( dupDoc ); +assert( !db.getLastError() ); +t.ensureIndex( {'a.0':1} ); +assert( db.getLastError() ); + +t.remove(); +t.ensureIndex( {'a.0':1} ); +assert( !db.getLastError() ); +t.save( dupDoc ); +assert( db.getLastError() ); + +// Test that we can't index dupDoc2. +t.drop(); +t.save( dupDoc2 ); +assert( !db.getLastError() ); +t.ensureIndex( {'a.1':1} ); +assert( db.getLastError() ); + +t.remove(); +t.ensureIndex( {'a.1':1} ); +assert( !db.getLastError() ); +t.save( dupDoc2 ); +assert( db.getLastError() ); + +// Test that we can index dupDoc with a different index. +t.drop(); +t.ensureIndex( {'a.b':1} ); +t.save( dupDoc ); +assert( !db.getLastError() ); + +// Test number field starting with hyphen. +t.drop(); +t.ensureIndex( {'a.-1':1} ); +t.save( {a:[{'-1':1}]} ); +assert( !db.getLastError() ); + +// Test number field starting with zero. +t.drop(); +t.ensureIndex( {'a.00':1} ); +t.save( {a:[{'00':1}]} ); +assert( !db.getLastError() ); + +// Test multiple array indexes +t.drop(); +t.ensureIndex( {'a.0':1,'a.1':1} ); +t.save( {a:[{'1':1}]} ); +assert( !db.getLastError() ); +t.save( {a:[{'1':1},4]} ); +assert( db.getLastError() ); + +// Test that we can index noDupDoc. +t.drop(); +t.save( noDupDoc ); +t.ensureIndex( {'a.0':1} ); +assert( !db.getLastError() ); +t.ensureIndex( {'a.1':1} ); +assert( !db.getLastError() ); + +t.drop(); +t.ensureIndex( {'a.0':1} ); +t.ensureIndex( {'a.1':1} ); +t.save( noDupDoc ); +assert( !db.getLastError() ); + +// Test that we can query noDupDoc. +assert.eq( 1, t.find( {'a.1':1} ).hint( {'a.1':1} ).itcount() ); +assert.eq( 1, t.find( {'a.1':1} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {'a.0':1} ).itcount() ); +assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {$natural:1} ).itcount() ); + +// Check multiple nested array fields. +t.drop(); +t.save( {a:[[1]]} ); +t.ensureIndex( {'a.0.0':1} ); +assert( !db.getLastError() ); +assert.eq( 1, t.find( {'a.0.0':1} ).hint( {$natural:1} ).itcount() ); +assert.eq( 1, t.find( {'a.0.0':1} ).hint( {'a.0.0':1} ).itcount() ); + +// Check where there is a duplicate for a partially addressed field but not for a fully addressed field. +t.drop(); +t.save( {a:[[1],{'0':1}]} ); +t.ensureIndex( {'a.0.0':1} ); +assert( db.getLastError() ); + +// Check where there is a duplicate for a fully addressed field. +t.drop(); +t.save( {a:[[1],{'0':[1]}]} ); +assert( !db.getLastError() ); +t.ensureIndex( {'a.0.0':1} ); +assert( db.getLastError() ); + +// Two ways of addressing parse to an array. +t.drop(); +t.save( {a:[{'0':1}]} ); +t.ensureIndex( {'a.0.0':1} ); +assert( db.getLastError() ); + +// Test several key depths - with same arrays being found. +t.drop(); +t.save( {a:[{'0':[{'0':1}]}]} ); +t.ensureIndex( {'a.0.0.0.0.0.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a.0.0.0.0.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a.0.0.0.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a.0.0.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a.0.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a.0':1} ); +assert( db.getLastError() ); +t.ensureIndex( {'a':1} ); +assert( !db.getLastError() ); + +// Two prefixes extract docs, but one terminates extraction before array. +t.drop(); +t.save( {a:[{'0':{'c':[]}}]} ); +t.ensureIndex( {'a.0.c':1} ); +assert( db.getLastError() ); + +t.drop(); +t.save( {a:[[{'b':1}]]} ); +assert.eq( 1, t.find( {'a.0.b':1} ).itcount() ); +t.ensureIndex( {'a.0.b':1} ); +assert.eq( 1, t.find( {'a.0.b':1} ).itcount() ); diff --git a/jstests/indexv.js b/jstests/indexv.js new file mode 100644 index 0000000..a69ff2a --- /dev/null +++ b/jstests/indexv.js @@ -0,0 +1,18 @@ +// Check null key generation. + +t = db.jstests_indexv; +t.drop(); + +t.ensureIndex( {'a.b':1} ); + +t.save( {a:[{},{b:1}]} ); +var e = t.find( {'a.b':null} ).explain(); +assert.eq( 0, e.n ); +assert.eq( 1, e.nscanned ); + +t.drop(); +t.ensureIndex( {'a.b.c':1} ); +t.save( {a:[{b:[]},{b:{c:1}}]} ); +var e = t.find( {'a.b.c':null} ).explain(); +assert.eq( 0, e.n ); +assert.eq( 1, e.nscanned ); diff --git a/jstests/indexw.js b/jstests/indexw.js new file mode 100644 index 0000000..3264434 --- /dev/null +++ b/jstests/indexw.js @@ -0,0 +1,14 @@ +// Check that v0 keys are generated for v0 indexes SERVER-3375 + +t = db.jstests_indexw; +t.drop(); + +t.save( {a:[]} ); +assert.eq( 1, t.count( {a:[]} ) ); +t.ensureIndex( {a:1} ); +assert.eq( 1, t.count( {a:[]} ) ); +t.dropIndexes(); + +// The count result is incorrect - just checking here that v0 key generation is used. +t.ensureIndex( {a:1}, {v:0} ); +assert.eq( 0, t.count( {a:[]} ) ); diff --git a/jstests/insert1.js b/jstests/insert1.js index 76edca1..7e6b73b 100644 --- a/jstests/insert1.js +++ b/jstests/insert1.js @@ -39,3 +39,6 @@ assert.eq(id1, id2, "ids match 4"); assert.eq(o, {a:4, _id:id1}, "input unchanged 4"); assert.eq(t.findOne({_id:id1}).a, 4, "find by id 4"); assert.eq(t.findOne({a:4})._id, id1 , "find by val 4"); + +var stats = db.runCommand({ collstats: "insert1" }); +assert(stats.paddingFactor == 1.0); diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js index 8624ef2..adf4f86 100644 --- a/jstests/libs/geo_near_random.js +++ b/jstests/libs/geo_near_random.js @@ -11,25 +11,46 @@ GeoNearRandomTest = function(name) { } -GeoNearRandomTest.prototype.mkPt = function mkPt(scale){ - scale = scale || 1; // scale is good for staying away from edges - return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; +GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){ + if(!indexBounds){ + scale = scale || 1; // scale is good for staying away from edges + return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale]; + } + else{ + var range = indexBounds.max - indexBounds.min; + var eps = Math.pow(2, -40); + // Go very close to the borders but not quite there. + return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min]; + } + } -GeoNearRandomTest.prototype.insertPts = function(nPts) { +GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds) { assert.eq(this.nPts, 0, "insertPoints already called"); this.nPts = nPts; for (var i=0; i NumberInt( 4 ) , "lt" ); +assert( NumberInt( 1 ) , "to bool a" ); + +// objects are always considered thruthy +//assert( ! NumberInt( 0 ) , "to bool b" ); + +// create doc with int value in db +t = db.getCollection( "numberint" ); +t.drop(); + +o = { a : NumberInt(42) }; +t.save( o ); + +assert.eq( 42 , t.findOne().a , "save doc 1" ); +assert.eq( 1 , t.find({a: {$type: 16}}).count() , "save doc 2" ); +assert.eq( 0 , t.find({a: {$type: 1}}).count() , "save doc 3" ); + +// roundtripping +mod = t.findOne({a: 42}); +mod.a += 10; +mod.b = "foo"; +delete mod._id; +t.save(mod); +assert.eq( 2 , t.find({a: {$type: 16}}).count() , "roundtrip 1" ); +assert.eq( 0 , t.find({a: {$type: 1}}).count() , "roundtrip 2" ); +assert.eq( 1 , t.find({a: 52}).count() , "roundtrip 3" ); + +// save regular number +t.save({a: 42}); +assert.eq( 2 , t.find({a: {$type: 16}}).count() , "normal 1" ); +assert.eq( 1 , t.find({a: {$type: 1}}).count() , "normal 2" ); +assert.eq( 2 , t.find({a: 42}).count() , "normal 3" ); + + diff --git a/jstests/numberlong2.js b/jstests/numberlong2.js new file mode 100644 index 0000000..2540d2d --- /dev/null +++ b/jstests/numberlong2.js @@ -0,0 +1,32 @@ +// Test precision of NumberLong values with v1 index code SERVER-3717 + +if ( 1 ) { // SERVER-3717 + +t = db.jstests_numberlong2; +t.drop(); + +t.ensureIndex( {x:1} ); + +function chk(longNum) { + t.remove(); + t.save({ x: longNum }); + assert.eq(longNum, t.find().hint({ x: 1 }).next().x); + assert.eq(longNum, t.find({}, { _id: 0, x: 1 }).hint({ x: 1 }).next().x); +} + +chk( NumberLong("1123539983311657217") ); +chk(NumberLong("-1123539983311657217")); + chk(NumberLong("4503599627370495")); + chk(NumberLong("4503599627370496")); + chk(NumberLong("4503599627370497")); + +t.remove(); + +s = "11235399833116571"; +for( i = 99; i >= 0; --i ) { + t.save( {x:NumberLong( s + i )} ); +} + +assert.eq( t.find().sort( {x:1} ).hint( {$natural:1} ).toArray(), t.find().sort( {x:1} ).hint( {x:1} ).toArray() ); + +} \ No newline at end of file diff --git a/jstests/numberlong3.js b/jstests/numberlong3.js new file mode 100644 index 0000000..10036c0 --- /dev/null +++ b/jstests/numberlong3.js @@ -0,0 +1,25 @@ +// Test sorting with long longs and doubles - SERVER-3719 + +t = db.jstests_numberlong3; +t.drop(); + +s = "11235399833116571"; +for( i = 10; i >= 0; --i ) { + n = NumberLong( s + i ); + t.save( {x:n} ); + if ( 0 ) { // SERVER-3719 + t.save( {x:n.floatApprox} ); + } +} + +ret = t.find().sort({x:1}).toArray().filter( function( x ) { return typeof( x.x.floatApprox ) != 'undefined' } ); + +//printjson( ret ); + +for( i = 1; i < ret.length; ++i ) { + first = ret[i-1].x.toString(); + second = ret[i].x.toString(); + if ( first.length == second.length ) { + assert.lte( ret[i-1].x.toString(), ret[i].x.toString() ); + } +} diff --git a/jstests/or1.js b/jstests/or1.js index 66162c4..66bbd2e 100644 --- a/jstests/or1.js +++ b/jstests/or1.js @@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) { bStr = []; a.forEach( function( x ) { aStr.push( tojson( x ) ); } ); b.forEach( function( x ) { bStr.push( tojson( x ) ); } ); - for ( i in aStr ) { + for ( i = 0; i < aStr.length; ++i ) { assert( -1 != bStr.indexOf( aStr[ i ] ), m ); } } diff --git a/jstests/or2.js b/jstests/or2.js index d90cc85..297542e 100644 --- a/jstests/or2.js +++ b/jstests/or2.js @@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) { bStr = []; a.forEach( function( x ) { aStr.push( tojson( x ) ); } ); b.forEach( function( x ) { bStr.push( tojson( x ) ); } ); - for ( i in aStr ) { + for ( i = 0; i < aStr.length; ++i ) { assert( -1 != bStr.indexOf( aStr[ i ] ), m ); } } @@ -29,7 +29,6 @@ doTest = function( index ) { assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } ); assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } ); assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } ); - assert.throws( function() { t.find( { x:0,$or:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } ); a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray(); checkArrs( [ { _id:0, x:0, a:1 } ], a1 ); diff --git a/jstests/or3.js b/jstests/or3.js index be85a8f..97028be 100644 --- a/jstests/or3.js +++ b/jstests/or3.js @@ -7,7 +7,7 @@ checkArrs = function( a, b, m ) { bStr = []; a.forEach( function( x ) { aStr.push( tojson( x ) ); } ); b.forEach( function( x ) { bStr.push( tojson( x ) ); } ); - for ( i in aStr ) { + for ( i = 0; i < aStr.length; ++i ) { assert( -1 != bStr.indexOf( aStr[ i ] ), m ); } } @@ -29,8 +29,6 @@ doTest = function( index ) { assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } ); assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } ); assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } ); - assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$or:[{x:0}]} ] } ).toArray(); } ); - assert.throws( function() { t.find( { x:0,$nor:[ {x:0,$nor:[{x:0}]} ] } ).toArray(); } ); an1 = t.find( { $nor: [ { a : 1 } ] } ).toArray(); checkArrs( t.find( {a:{$ne:1}} ).toArray(), an1 ); diff --git a/jstests/or4.js b/jstests/or4.js index f793f36..3bfe191 100644 --- a/jstests/or4.js +++ b/jstests/or4.js @@ -17,7 +17,7 @@ checkArrs = function( a, b ) { bStr = []; a.forEach( function( x ) { aStr.push( tojson( x ) ); } ); b.forEach( function( x ) { bStr.push( tojson( x ) ); } ); - for ( i in aStr ) { + for ( i = 0; i < aStr.length; ++i ) { assert( -1 != bStr.indexOf( aStr[ i ] ), m ); } } diff --git a/jstests/ord.js b/jstests/ord.js index 4612f21..f78e504 100644 --- a/jstests/ord.js +++ b/jstests/ord.js @@ -28,6 +28,7 @@ for( i = 0; i < 90; ++i ) { // the index key {a:1} t.dropIndex( {a:1} ); +db.getLastError(); // Dropping an index kills all cursors on the indexed namespace, not just those // cursors using the dropped index. diff --git a/jstests/org.js b/jstests/org.js new file mode 100644 index 0000000..0833798 --- /dev/null +++ b/jstests/org.js @@ -0,0 +1,19 @@ +// SERVER-2282 $or de duping with sparse indexes + +t = db.jstests_org; +t.drop(); + +t.ensureIndex( {a:1}, {sparse:true} ); +t.ensureIndex( {b:1} ); + +t.remove(); +t.save( {a:1,b:2} ); +assert.eq( 1, t.count( {$or:[{a:1},{b:2}]} ) ); + +t.remove(); +t.save( {a:null,b:2} ); +assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) ); + +t.remove(); +t.save( {b:2} ); +assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) ); diff --git a/jstests/orh.js b/jstests/orh.js new file mode 100644 index 0000000..35f6a5b --- /dev/null +++ b/jstests/orh.js @@ -0,0 +1,17 @@ +// SERVER-2831 Demonstration of sparse index matching semantics in a multi index $or query. + +t = db.jstests_orh; +t.drop(); + +t.ensureIndex( {a:1}, {sparse:true} ); +t.ensureIndex( {b:1,a:1} ); + +t.remove(); +t.save( {b:2} ); +assert.eq( 0, t.count( {a:null} ) ); +assert.eq( 1, t.count( {b:2,a:null} ) ); + +assert.eq( 1, t.count( {$or:[{b:2,a:null},{a:null}]} ) ); + +// Is this desired? +assert.eq( 0, t.count( {$or:[{a:null},{b:2,a:null}]} ) ); diff --git a/jstests/ori.js b/jstests/ori.js new file mode 100644 index 0000000..9d923d6 --- /dev/null +++ b/jstests/ori.js @@ -0,0 +1,48 @@ +// Check elimination of proper range type when popping a $or clause SERVER-958. + +t = db.jstests_ori; +t.drop(); + +t.ensureIndex( {a:1,b:1} ); +t.ensureIndex( {a:1,c:1} ); + +t.save( {a:1,b:[2,3],c:4} ); +t.save( {a:10,b:2,c:4} ); + +// Check that proper results are returned. + +assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) ); +// Two $or clauses expected to be scanned. +assert.eq( 2, t.find( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ).explain().clauses.length ); +assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) ); + +t.drop(); + +// Now try a different index order. + +t.ensureIndex( {b:1,a:1} ); +t.ensureIndex( {a:1,c:1} ); + +t.save( {a:1,b:[2,3],c:4} ); +t.save( {a:10,b:2,c:4} ); + +assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5},b:2},{a:10,c:4}]} ) ); +assert.eq( 2, t.count( {$or:[{a:10,b:2},{a:{$gt:0,$lt:5},c:4}]} ) ); + +t.drop(); + +// Now eliminate a range. + +t.ensureIndex( {a:1} ); +t.ensureIndex( {b:1} ); + +t.save( {a:[1,2],b:1} ); +t.save( {a:10,b:1} ); + +assert.eq( 2, t.count( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ) ); +// Because a:1 is multikey, the value a:10 is scanned with the first clause. +assert.isnull( t.find( {$or:[{a:{$gt:0,$lt:5}},{a:10,b:1}]} ).explain().clauses ); + +assert.eq( 2, t.count( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ) ); +// Now a:10 is not scanned in the first clause so the second clause is not eliminated. +assert.eq( 2, t.find( {$or:[{a:{$lt:5,$gt:0}},{a:10,b:1}]} ).explain().clauses.length ); diff --git a/jstests/orj.js b/jstests/orj.js new file mode 100644 index 0000000..fa234f3 --- /dev/null +++ b/jstests/orj.js @@ -0,0 +1,121 @@ +// Test nested $or clauses SERVER-2585 SERVER-3192 + +t = db.jstests_orj; +t.drop(); + +t.save( {a:1,b:2} ); + +function check() { + +assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } ); + +assert.throws( function() { t.find( { x:0,$or:[{$or:"a"}] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$or:[{$or:[]}] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$or:[{$or:[ "a" ]}] } ).toArray(); } ); + +assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } ); + +assert.throws( function() { t.find( { x:0,$nor:[{$nor:"a"}] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$nor:[{$nor:[]}] } ).toArray(); } ); +assert.throws( function() { t.find( { x:0,$nor:[{$nor:[ "a" ]}] } ).toArray(); } ); + +assert.eq( 1, t.find( {a:1,b:2} ).itcount() ); + +assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).itcount() ); +assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).itcount() ); + +assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() ); +assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() ); +assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).itcount() ); + +assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).itcount() ); +assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).itcount() ); +assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).itcount() ); + +assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() ); +assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).itcount() ); +assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() ); + +assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).itcount() ); +assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).itcount() ); + +assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() ); +assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() ); +assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).itcount() ); + +assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() ); +assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() ); +assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).itcount() ); + +assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() ); +assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() ); +assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() ); + +} + +check(); + +t.ensureIndex( {a:1} ); +check(); +t.dropIndexes(); + +t.ensureIndex( {b:1} ); +check(); +t.dropIndexes(); + +t.ensureIndex( {a:1} ); +t.ensureIndex( {b:1} ); +check(); +t.dropIndexes(); + +t.ensureIndex( {a:1,b:1} ); +check(); +t.dropIndexes(); + +t.ensureIndex( {a:1} ); +t.ensureIndex( {b:1} ); +t.ensureIndex( {a:1,b:1} ); +check(); + +function checkHinted( hint ) { + assert.eq( 1, t.find( {a:1,b:2} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() ); + assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).hint( hint ).itcount() ); + + assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() ); + assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).hint( hint ).itcount() ); + + assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() ); + assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() ); +} + +checkHinted( {$natural:1} ); +checkHinted( {a:1} ); +checkHinted( {b:1} ); +checkHinted( {a:1,b:1} ); \ No newline at end of file diff --git a/jstests/ork.js b/jstests/ork.js new file mode 100644 index 0000000..d6d4016 --- /dev/null +++ b/jstests/ork.js @@ -0,0 +1,11 @@ +// SERVER-2585 Test $or clauses within indexed top level $or clauses. + +t = db.jstests_ork; +t.drop(); + +t.ensureIndex( {a:1} ); +t.save( {a:[1,2],b:5} ); +t.save( {a:[2,4],b:5} ); + +assert.eq( 2, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:5}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() ); +assert.eq( 1, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:6}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() ); diff --git a/jstests/orl.js b/jstests/orl.js new file mode 100644 index 0000000..2726975 --- /dev/null +++ b/jstests/orl.js @@ -0,0 +1,13 @@ +// SERVER-3445 Test using coarse multikey bounds for or range elimination. + +t = db.jstests_orl; +t.drop(); + +t.ensureIndex( {'a.b':1,'a.c':1} ); +// make the index multikey +t.save( {a:{b:[1,2]}} ); + +// SERVER-3445 +if ( 0 ) { +assert( !t.find( {$or:[{'a.b':2,'a.c':3},{'a.b':2,'a.c':4}]} ).explain().clauses ); +} \ No newline at end of file diff --git a/jstests/orm.js b/jstests/orm.js new file mode 100644 index 0000000..dae75e4 --- /dev/null +++ b/jstests/orm.js @@ -0,0 +1,29 @@ +// Test dropping during a $or yield SERVER-3555 + +t = db.jstests_orm; +t.drop(); + +clauses = []; +for( i = 0; i < 10; ++i ) { + clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} ); + clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} ); +} + +p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orm.drop() }' ); +for( j = 0; j < 5; ++j ) { + for( i = 0; i < 5000; ++i ) { + t.save( {a:i,i:i} ); + t.save( {b:i,i:i} ); + } + t.ensureIndex( {a:1} ); + t.ensureIndex( {b:1} ); + try { + t.find( {$or:clauses} ).itcount(); + t.find( {$or:clauses} ).count(); + t.update( {$or:clauses}, {} ); + t.remove( {$or:clauses} ); + } catch ( e ) { + } + db.getLastError(); +} +p(); diff --git a/jstests/orn.js b/jstests/orn.js new file mode 100644 index 0000000..c900bb8 --- /dev/null +++ b/jstests/orn.js @@ -0,0 +1,22 @@ +// Test dropping during an $or distinct yield SERVER-3555 + +t = db.jstests_orn; +t.drop(); + +clauses = []; +for( i = 0; i < 10; ++i ) { + clauses.push( {a:{$lte:(i+1)*5000/10},i:49999} ); + clauses.push( {b:{$lte:(i+1)*5000/10},i:49999} ); +} + +p = startParallelShell( 'for( i = 0; i < 15; ++i ) { sleep( 1000 ); db.jstests_orn.drop() }' ); +for( j = 0; j < 5; ++j ) { + for( i = 0; i < 5000; ++i ) { + t.save( {a:i,i:i} ); + t.save( {b:i,i:i} ); + } + t.ensureIndex( {a:1} ); + t.ensureIndex( {b:1} ); + t.distinct('a',{$or:clauses}); +} +p(); diff --git a/jstests/profile1.js b/jstests/profile1.js index 0e8009a..9654357 100644 --- a/jstests/profile1.js +++ b/jstests/profile1.js @@ -1,49 +1,125 @@ +print("profile1.js BEGIN"); try { -/* With pre-created system.profile (capped) */ -db.runCommand({profile: 0}); -db.getCollection("system.profile").drop(); -assert(!db.getLastError(), "Z"); -assert.eq(0, db.runCommand({profile: -1}).was, "A"); + function getProfileAString() { + var s = "\n"; + db.system.profile.find().forEach( function(z){ + s += tojson( z ) + " ,\n" ; + } ); + return s; + } -db.createCollection("system.profile", {capped: true, size: 1000}); -db.runCommand({profile: 2}); -assert.eq(2, db.runCommand({profile: -1}).was, "B"); -assert.eq(1, db.system.profile.stats().capped, "C"); -var capped_size = db.system.profile.storageSize(); -assert.gt(capped_size, 999, "D"); -assert.lt(capped_size, 2000, "E"); + /* With pre-created system.profile (capped) */ + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert(!db.getLastError(), "Z"); + assert.eq(0, db.runCommand({profile: -1}).was, "A"); + + db.createCollection("system.profile", {capped: true, size: 10000}); + db.runCommand({profile: 2}); + assert.eq(2, db.runCommand({profile: -1}).was, "B"); + assert.eq(1, db.system.profile.stats().capped, "C"); + var capped_size = db.system.profile.storageSize(); + assert.gt(capped_size, 9999, "D"); + assert.lt(capped_size, 20000, "E"); + + db.foo.findOne() + + assert.eq( 4 , db.system.profile.find().count() , "E2" ); + + /* Make sure we can't drop if profiling is still on */ + assert.throws( function(z){ db.getCollection("system.profile").drop(); } ) -db.foo.findOne() + /* With pre-created system.profile (un-capped) */ + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert.eq(0, db.runCommand({profile: -1}).was, "F"); + + db.createCollection("system.profile"); + db.runCommand({profile: 2}); + assert.eq(2, db.runCommand({profile: -1}).was, "G"); + assert.eq(null, db.system.profile.stats().capped, "G1"); + + /* With no system.profile collection */ + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert.eq(0, db.runCommand({profile: -1}).was, "H"); + + db.runCommand({profile: 2}); + assert.eq(2, db.runCommand({profile: -1}).was, "I"); + assert.eq(1, db.system.profile.stats().capped, "J"); + var auto_size = db.system.profile.storageSize(); + assert.gt(auto_size, capped_size, "K"); + -assert.eq( 4 , db.system.profile.find().count() , "E2" ); + db.eval("sleep(1)") // pre-load system.js -/* Make sure we can't drop if profiling is still on */ -assert.throws( function(z){ db.getCollection("system.profile").drop(); } ) + db.setProfilingLevel(2); + before = db.system.profile.count(); + db.eval( "sleep(25)" ) + db.eval( "sleep(120)" ) + after = db.system.profile.count() + assert.eq( before + 3 , after , "X1" ) -/* With pre-created system.profile (un-capped) */ -db.runCommand({profile: 0}); -db.getCollection("system.profile").drop(); -assert.eq(0, db.runCommand({profile: -1}).was, "F"); + /* sleep() could be inaccurate on certain platforms. let's check */ + print("\nsleep 2 time actual:"); + for (var i = 0; i < 4; i++) { + print(db.eval("var x = new Date(); sleep(2); return new Date() - x;")); + } + print(); + print("\nsleep 20 times actual:"); + for (var i = 0; i < 4; i++) { + print(db.eval("var x = new Date(); sleep(20); return new Date() - x;")); + } + print(); + print("\nsleep 120 times actual:"); + for (var i = 0; i < 4; i++) { + print(db.eval("var x = new Date(); sleep(120); return new Date() - x;")); + } + print(); -db.createCollection("system.profile"); -db.runCommand({profile: 2}); -assert.eq(2, db.runCommand({profile: -1}).was, "G"); -assert.eq(null, db.system.profile.stats().capped, "G1"); + function evalSleepMoreThan(millis,max){ + var start = new Date(); + db.eval("sleep("+millis+")"); + var end = new Date(); + var actual = end.getTime() - start.getTime(); + if ( actual > ( millis + 5 ) ) { + print( "warning wanted to sleep for: " + millis + " but took: " + actual ); + } + return actual >= max ? 1 : 0; + } -/* With no system.profile collection */ -db.runCommand({profile: 0}); -db.getCollection("system.profile").drop(); -assert.eq(0, db.runCommand({profile: -1}).was, "H"); + db.setProfilingLevel(1,100); + before = db.system.profile.count(); + var delta = 0; + delta += evalSleepMoreThan( 15 , 100 ); + delta += evalSleepMoreThan( 120 , 100 ); + after = db.system.profile.count() + assert.eq( before + delta , after , "X2 : " + getProfileAString() ) -db.runCommand({profile: 2}); -assert.eq(2, db.runCommand({profile: -1}).was, "I"); -assert.eq(1, db.system.profile.stats().capped, "J"); -var auto_size = db.system.profile.storageSize(); -assert.gt(auto_size, capped_size, "K"); + db.setProfilingLevel(1,20); + before = db.system.profile.count(); + delta = 0; + delta += evalSleepMoreThan( 5 , 20 ); + delta += evalSleepMoreThan( 120 , 20 ); + after = db.system.profile.count() + assert.eq( before + delta , after , "X3 : " + getProfileAString() ) + + db.profile.drop(); + db.setProfilingLevel(2) + var q = { _id : 5 }; + var u = { $inc : { x : 1 } }; + db.profile1.update( q , u ); + var r = db.system.profile.find().sort( { $natural : -1 } )[0] + assert.eq( q , r.query , "Y1" ); + assert.eq( u , r.updateobj , "Y2" ); + assert.eq( "update" , r.op , "Y3" ); + assert.eq("test.profile1", r.ns, "Y4"); + print("profile1.js SUCCESS OK"); + } finally { // disable profiling for subsequent tests assert.commandWorked( db.runCommand( {profile:0} ) ); -} \ No newline at end of file +} diff --git a/jstests/profile2.js b/jstests/profile2.js new file mode 100644 index 0000000..929b463 --- /dev/null +++ b/jstests/profile2.js @@ -0,0 +1,19 @@ +print("profile2.js BEGIN"); + +try { + + assert.commandWorked( db.runCommand( {profile:2} ) ); + + huge = 'huge'; + while (huge.length < 2*1024*1024){ + huge += huge; + } + + db.profile2.count({huge:huge}) // would make a huge entry in db.system.profile + + print("profile2.js SUCCESS OK"); + +} finally { + // disable profiling for subsequent tests + assert.commandWorked( db.runCommand( {profile:0} ) ); +} diff --git a/jstests/profile3.js b/jstests/profile3.js new file mode 100644 index 0000000..a6574b7 --- /dev/null +++ b/jstests/profile3.js @@ -0,0 +1,26 @@ + +t = db.profile3; +t.drop(); + +try { + db.setProfilingLevel(0); + + db.system.profile.drop(); + assert.eq( 0 , db.system.profile.count() ) + + db.setProfilingLevel(2); + + t.insert( { x : 1 } ); + t.findOne( { x : 1 } ); + t.find( { x : 1 } ).count(); + + db.system.profile.find().forEach( printjson ) + + db.setProfilingLevel(0); + db.system.profile.drop(); + +} +finally { + db.setProfilingLevel(0); +} + diff --git a/jstests/push.js b/jstests/push.js index 2cdd91c..9bcaa2f 100644 --- a/jstests/push.js +++ b/jstests/push.js @@ -17,6 +17,38 @@ assert.eq( "2" , t.findOne().a.toString() , "D" ); t.update( { _id : 2 } , { $push : { a : 3 } } ); t.update( { _id : 2 } , { $push : { a : 4 } } ); t.update( { _id : 2 } , { $push : { a : 5 } } ); -assert.eq( "2,3,4,5" , t.findOne().a.toString() , "D" ); +assert.eq( "2,3,4,5" , t.findOne().a.toString() , "E1" ); + +t.update( { _id : 2 } , { $pop : { a : -1 } } ); +assert.eq( "3,4,5" , t.findOne().a.toString() , "E2" ); + +t.update( { _id : 2 } , { $pop : { a : -1 } } ); +assert.eq( "4,5" , t.findOne().a.toString() , "E3" ); + t.update( { _id : 2 } , { $pop : { a : -1 } } ); -assert.eq( "3,4,5" , t.findOne().a.toString() , "D" ); +assert.isnull( db.getLastError() , "E4a" ) +assert.eq( "5" , t.findOne().a.toString() , "E4" ); + + +t.update( { _id : 2 } , { $pop : { a : -1 } } ); +assert.isnull( db.getLastError() , "E5a") +assert.eq( "" , t.findOne().a.toString() , "E5" ); + +t.update( { _id : 2 } , { $pop : { a : -1 } } ); +assert.isnull( db.getLastError() , "E6a" ) +assert.eq( "" , t.findOne().a.toString() , "E6" ); + +t.update( { _id : 2 } , { $pop : { a : -1 } } ); +assert.isnull( db.getLastError() , "E7a" ) +assert.eq( "" , t.findOne().a.toString() , "E7" ); + +t.update( { _id : 2 } , { $pop : { a : 1 } } ); +assert.isnull( db.getLastError() , "E8a" ) +assert.eq( "" , t.findOne().a.toString() , "E8" ); + +t.update( { _id : 2 } , { $pop : { b : -1 } } ); +assert.isnull( db.getLastError() , "E4a" ) + +t.update( { _id : 2 } , { $pop : { b : 1 } } ); +assert.isnull( db.getLastError() , "E4a" ) + diff --git a/jstests/query1.js b/jstests/query1.js index 9b40054..c3e276f 100644 --- a/jstests/query1.js +++ b/jstests/query1.js @@ -18,3 +18,6 @@ t.find().forEach( assert.eq( num , 3 , "num" ) assert.eq( total , 8 , "total" ) + +assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).itcount() , "B1" ) +assert.eq( 3 , t.find()._addSpecial( "$comment" , "this is a test" ).count() , "B2" ) diff --git a/jstests/regex2.js b/jstests/regex2.js index b6a21f5..87d5cb4 100644 --- a/jstests/regex2.js +++ b/jstests/regex2.js @@ -60,3 +60,11 @@ assert.eq( 1 , t.find( { a : {$regex: a} } ).count() , "obj C D" ); assert.eq( 1 , t.find( { a : {$regex: b} } ).count() , "obj C E" ); assert.eq( 2 , t.find( { a : {$regex: a , $options: "i" } } ).count() , "obj C F is spidermonkey built with UTF-8 support?" ); +// Test s (DOT_ALL) option. Not supported with /regex/opts syntax +t.drop(); +t.save({a:'1 2'}) +t.save({a:'1\n2'}) +assert.eq( 1 , t.find( { a : {$regex: '1.*2'} } ).count() ); +assert.eq( 2 , t.find( { a : {$regex: '1.*2', $options: 's'} } ).count() ); + + diff --git a/jstests/regex6.js b/jstests/regex6.js index 8243313..5414324 100644 --- a/jstests/regex6.js +++ b/jstests/regex6.js @@ -6,6 +6,7 @@ t.save( { name : "eliot" } ); t.save( { name : "emily" } ); t.save( { name : "bob" } ); t.save( { name : "aaron" } ); +t.save( { name : "[with]some?symbols" } ); t.ensureIndex( { name : 1 } ); @@ -14,9 +15,15 @@ assert.eq( 1 , t.find( { name : /^\// } ).explain().nscanned , "index explain 1" assert.eq( 0 , t.find( { name : /^é/ } ).explain().nscanned , "index explain 2" ); assert.eq( 0 , t.find( { name : /^\é/ } ).explain().nscanned , "index explain 3" ); assert.eq( 1 , t.find( { name : /^\./ } ).explain().nscanned , "index explain 4" ); -assert.eq( 4 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" ); +assert.eq( 5 , t.find( { name : /^./ } ).explain().nscanned , "index explain 5" ); -assert.eq( 4 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" ); +// SERVER-2862 +assert.eq( 0 , t.find( { name : /^\Qblah\E/ } ).count() , "index explain 6" ); +assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain().nscanned , "index explain 6" ); +assert.eq( 1 , t.find( { name : /^blah/ } ).explain().nscanned , "index explain 6" ); +assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index explain 6" ); +assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain().nscanned , "index explain 6" ); +assert.eq( 2 , t.find( { name : /^bob/ } ).explain().nscanned , "index explain 6" ); // proof nscanned == count+1 assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain().nscanned , "ie7" ); assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain().nscanned , "ie7" ); diff --git a/jstests/regexa.js b/jstests/regexa.js new file mode 100644 index 0000000..b0d4719 --- /dev/null +++ b/jstests/regexa.js @@ -0,0 +1,19 @@ +// Test simple regex optimization with a regex | (bar) present - SERVER-3298 + +t = db.jstests_regexa; +t.drop(); + +function check() { + assert.eq( 1, t.count( {a:/^(z|.)/} ) ); + assert.eq( 1, t.count( {a:/^z|./} ) ); + assert.eq( 0, t.count( {a:/^z(z|.)/} ) ); + assert.eq( 1, t.count( {a:/^zz|./} ) ); +} + +t.save( {a:'a'} ); + +check(); +t.ensureIndex( {a:1} ); +if ( 1 ) { // SERVER-3298 +check(); +} diff --git a/jstests/remove10.js b/jstests/remove10.js new file mode 100644 index 0000000..cf1dac4 --- /dev/null +++ b/jstests/remove10.js @@ -0,0 +1,28 @@ +// SERVER-2009 Update documents with adjacent indexed keys. +// This test doesn't fail, it just prints an invalid warning message. + +if ( 0 ) { // SERVER-2009 +t = db.jstests_remove10; +t.drop(); +t.ensureIndex( {i:1} ); + +function arr( i ) { + ret = []; + for( j = i; j < i + 11; ++j ) { + ret.push( j ); + } + return ret; +} + +for( i = 0; i < 1100; i += 11 ) { + t.save( {i:arr( i )} ); +} + +s = startParallelShell( 't = db.jstests_remove10; for( j = 0; j < 1000; ++j ) { o = t.findOne( {i:Random.randInt(1100)} ); t.remove( {_id:o._id} ); t.insert( o ); }' ); + +for( i = 0; i < 200; ++i ) { + t.find( {i:{$gte:0}} ).hint( {i:1} ).itcount(); +} + +s(); +} \ No newline at end of file diff --git a/jstests/remove2.js b/jstests/remove2.js index ff122a0..eb4ef07 100644 --- a/jstests/remove2.js +++ b/jstests/remove2.js @@ -21,6 +21,11 @@ function g() { t.save( { x:[7,8,9], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } ); t.remove( {x : {$gte:3}, $atomic:x++ } ); + + assert( !db.getLastError() ); + // $atomic within $and is not allowed. + t.remove( {x : {$gte:3}, $and:[{$atomic:true}] } ); + assert( db.getLastError() ); assert( t.findOne({x:3}) == null ); assert( t.findOne({x:8}) == null ); diff --git a/jstests/remove9.js b/jstests/remove9.js new file mode 100644 index 0000000..655594a --- /dev/null +++ b/jstests/remove9.js @@ -0,0 +1,16 @@ +// SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries. + +t = db.jstests_remove9; +t.drop(); +t.ensureIndex( {i:1} ); +for( i = 0; i < 1000; ++i ) { + t.save( {i:i} ); +} + +s = startParallelShell( 't = db.jstests_remove9; for( j = 0; j < 5000; ++j ) { i = Random.randInt( 499 ) * 2; t.update( {i:i}, {$set:{i:2000}} ); t.remove( {i:2000} ); t.save( {i:i} ); }' ); + +for( i = 0; i < 1000; ++i ) { + assert.eq( 500, t.find( {i:{$gte:0,$mod:[2,1]}} ).hint( {i:1} ).itcount() ); +} + +s(); diff --git a/jstests/rename.js b/jstests/rename.js index 3ace968..d475cc6 100644 --- a/jstests/rename.js +++ b/jstests/rename.js @@ -31,17 +31,24 @@ a.drop(); b.drop(); c.drop(); -db.createCollection( "jstests_rename_a", {capped:true,size:100} ); -for( i = 0; i < 10; ++i ) { +// TODO: too many numbers hard coded here +// this test depends precisely on record size and hence may not be very reliable +// note we use floats to make sure numbers are represented as doubles for both SM and v8, since test relies on record size +db.createCollection( "jstests_rename_a", {capped:true,size:10000} ); +for( i = 0.1; i < 10; ++i ) { a.save( { i: i } ); } assert.commandWorked( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_b"} ) ); -assert.eq( 1, b.count( {i:9} ) ); -for( i = 10; i < 20; ++i ) { +assert.eq( 1, b.count( {i:9.1} ) ); +for( i = 10.1; i < 250; ++i ) { b.save( { i: i } ); } -assert.eq( 0, b.count( {i:9} ) ); -assert.eq( 1, b.count( {i:19} ) ); + +//res = b.find().sort({i:1}); +//while (res.hasNext()) printjson(res.next()); + +assert.eq( 0, b.count( {i:9.1} ) ); +assert.eq( 1, b.count( {i:19.1} ) ); assert( db.system.namespaces.findOne( {name:"test.jstests_rename_b" } ) ); assert( !db.system.namespaces.findOne( {name:"test.jstests_rename_a" } ) ); diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js index 15fc983..4a6091d 100644 --- a/jstests/repl/basic1.js +++ b/jstests/repl/basic1.js @@ -60,7 +60,7 @@ r = function( key , v ){ correct = { a : 2 , b : 1 }; function checkMR( t ){ - var res = t.mapReduce( m , r , "basic1_out" ); + var res = t.mapReduce( m , r , { out : { inline : 1 } } ) assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) ); } @@ -148,6 +148,23 @@ x = { _id : 1 , x : 1 } assert.eq( x , am.mu1.findOne() , "mu1" ); assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEqual( x , z ); } , "mu2" ) +// profiling - this sould be last + +am.setProfilingLevel( 2 ) +am.foo.insert( { x : 1 } ) +am.foo.findOne() +block(); +assert.eq( 2 , am.system.profile.count() , "P1" ) +assert.eq( 0 , as.system.profile.count() , "P2" ) + +assert.eq( 1 , as.foo.findOne().x , "P3" ); +assert.eq( 0 , as.system.profile.count() , "P4" ) + +assert( as.getCollectionNames().indexOf( "system.profile" ) < 0 , "P4.5" ) + +as.setProfilingLevel(2) +as.foo.findOne(); +assert.eq( 1 , as.system.profile.count() , "P5" ) rt.stop(); diff --git a/jstests/repl/dbcase.js b/jstests/repl/dbcase.js new file mode 100644 index 0000000..10a5a61 --- /dev/null +++ b/jstests/repl/dbcase.js @@ -0,0 +1,95 @@ +// Test db case checking with replication SERVER-2111 + +baseName = "jstests_repl_dbcase"; + +rt = new ReplTest( baseName ); + +m = rt.start( true ); +s = rt.start( false ); + +n1 = "dbname"; +n2 = "dbNAme"; + +/** + * The value of n should be n1 or n2. Check that n is soon present while its + * opposite is not present. + */ +function check( n ) { + assert.soon( function() { + try { + // Our db name changes may trigger an exception - SERVER-3189. + names = s.getDBNames(); + } catch (e) { + return false; + } + n1Idx = names.indexOf( n1 ); + n2Idx = names.indexOf( n2 ); + if ( n1Idx != -1 && n2Idx != -1 ) { + // n1 and n2 may both be reported as present transiently. + return false; + } + // Return true if we matched expected n. + return -1 != names.indexOf( n ); + } ); +} + +/** Allow some time for additional operations to be processed by the slave. */ +function checkTwice( n ) { + check( n ); + // zzz is expected to be cloned after n1 and n2 because of its position in the alphabet. + m.getDB( "zzz" ).c.save( {} ); + assert.soon( function() { return s.getDB( "zzz" ).c.count(); } ) + check( n ); + m.getDB( "zzz" ).dropDatabase(); +} + +/** + * The slave may create in memory db names on the master matching old dbs it is + * attempting to clone. This function forces operation 'cmd' by deleting those + * in memory dbs if necessary. This function should only be called in cases where + * 'cmd' would succeed if not for the in memory dbs on master created by the slave. + */ +function force( cmd ) { + print( "cmd: " + cmd ); + eval( cmd ); + while( m1.getLastError() ) { + sleep( 100 ); + m1.dropDatabase(); + m2.dropDatabase(); + eval( cmd ); + } +} + +m1 = m.getDB( n1 ); +m2 = m.getDB( n2 ); + +m1.c.save( {} ); +m2.c.save( {} ); // will fail due to conflict +check( n1 ); + +m1.dropDatabase(); +force( "m2.c.save( {} );" ); // will now succeed +check( n2 ); + +m2.dropDatabase(); +force( "m1.c.save( {} );" ); +check( n1 ); + +for( i = 0; i < 5; ++i ) { + m1.dropDatabase(); + force( "m2.c.save( {} );" ); + m2.dropDatabase(); + force( "m1.c.save( {} );" ); +} +checkTwice( n1 ); + +m1.dropDatabase(); +force( "m2.c.save( {} );" ); + +for( i = 0; i < 5; ++i ) { + m2.dropDatabase(); + force( "m1.c.save( {} );" ); + m1.dropDatabase(); + force( "m2.c.save( {} );" ); +} +checkTwice( n2 ); diff --git a/jstests/repl/drop_dups.js b/jstests/repl/drop_dups.js new file mode 100644 index 0000000..100f469 --- /dev/null +++ b/jstests/repl/drop_dups.js @@ -0,0 +1,68 @@ + +var rt = new ReplTest( "drop_dups" ); + +m = rt.start( true ); +s = rt.start( false ); + +function block(){ + am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } ) +} + +am = m.getDB( "foo" ); +as = s.getDB( "foo" ); + +function run( createInBackground ) { + + collName = "foo" + ( createInBackground ? "B" : "F" ); + + am[collName].drop(); + am.blah.insert( { x : 1 } ) + assert.soon( function(){ + block(); + return as.blah.findOne(); + } + ); + + + for ( i=0; i<10; i++ ) { + am[collName].insert( { _id : i , x : Math.floor( i / 2 ) } ) + } + + block(); + + am.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } ); + am.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } ); + + as.runCommand( { "godinsert" : collName , obj : { _id : 101 , x : 20 } } ); + as.runCommand( { "godinsert" : collName , obj : { _id : 100 , x : 20 } } ); + + assert.eq( as[collName].count() , am[collName].count() ); + + function mymap(z) { + return z._id + ":" + z.x + ","; + } + + + if ( am.serverStatus().mem.bits == 64 ) { + assert.neq( tojson(am[collName].find().map(mymap)) , + tojson(as[collName].find().map(mymap)) , "order is not supposed to be same on master and slave but it is" ); + } + + + am[collName].ensureIndex( { x : 1 } , { unique : true , dropDups : true , background : createInBackground } ); + am.blah.insert( { x : 1 } ) + block(); + + assert.eq( 2 , am[collName].getIndexKeys().length , "A1 : " + createInBackground ) + assert.eq( 2 , as[collName].getIndexKeys().length , "A2 : " + createInBackground ) + + assert.eq( am[collName].find().sort( { _id : 1 } ).map(mymap) , + as[collName].find().sort( { _id : 1 } ).map(mymap) , "different things dropped on master and slave" ); + + +} + +run( false ) +run( true ) + +rt.stop() diff --git a/jstests/repl/mastermaster1.js b/jstests/repl/mastermaster1.js index 4932d5a..97fdc14 100644 --- a/jstests/repl/mastermaster1.js +++ b/jstests/repl/mastermaster1.js @@ -4,32 +4,45 @@ ports = allocatePorts( 2 ) left = startMongodTest( ports[0] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[1] } ) -right = startMongodTest( ports[1] , "mastermaster1left" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } ) - -print( "check 1" ) x = left.getDB( "admin" ).runCommand( "ismaster" ) assert( x.ismaster , "left: " + tojson( x ) ) +right = startMongodTest( ports[1] , "mastermaster1right" , false , { master : "" , slave : "" , source : "127.0.0.1:" + ports[0] } ) + x = right.getDB( "admin" ).runCommand( "ismaster" ) assert( x.ismaster , "right: " + tojson( x ) ) +print( "check 1" ) + + ldb = left.getDB( "test" ) rdb = right.getDB( "test" ) print( "check 2" ) ldb.foo.insert( { _id : 1 , x : "eliot" } ) -var result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } ); +result = ldb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } ); printjson(result); rdb.foo.insert( { _id : 2 , x : "sara" } ) -result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 20000 } ) +result = rdb.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 40000 } ) printjson(result); print( "check 3" ) +print( "left" ) +ldb.foo.find().forEach( printjsononeline ) +print( "right" ) +rdb.foo.find().forEach( printjsononeline ) + +print( "oplog" ) + +rdb.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().forEach( printjsononeline ) + +/* assert.eq( 2 , ldb.foo.count() , "B1" ) assert.eq( 2 , rdb.foo.count() , "B2" ) +*/ print( "going to stop everything" ) diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js new file mode 100644 index 0000000..d39e747 --- /dev/null +++ b/jstests/repl/mod_move.js @@ -0,0 +1,69 @@ + +// test repl basics +// data on master/slave is the same + +var rt = new ReplTest( "mod_move" ); + +m = rt.start( true , { oplogSize : 50 } ); + +function block(){ + am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } ) +} + +am = m.getDB( "foo" ); + +function check( note ){ + var start = new Date(); + var x,y; + while ( (new Date()).getTime() - start.getTime() < 5 * 60 * 1000 ){ + x = am.runCommand( "dbhash" ); + y = as.runCommand( "dbhash" ); + if ( x.md5 == y.md5 ) + return; + sleep( 200 ); + } + assert.eq( x.md5 , y.md5 , note ); +} + +// insert a lot of 'big' docs +// so when we delete them the small docs move here + +BIG = 100000; +N = BIG * 2; + +s : "asdasdasdasdasdasdasdadasdadasdadasdasdas" + +for ( i=0; i=BIG; i-- ) { + am.a.update( { _id : i } , { $set : { x : 1 } } ) + if ( i == N ) { + am.getLastError() + assert.lt( as.a.count() , BIG , "B1" ) + print( "NOW : " + as.a.count() ) + } +} + +check( "B" ) + +rt.stop(); + + + + diff --git a/jstests/repl/pair1.js b/jstests/repl/pair1.js deleted file mode 100644 index 84dd7b7..0000000 --- a/jstests/repl/pair1.js +++ /dev/null @@ -1,100 +0,0 @@ -// Basic pairing test - -var baseName = "jstests_pair1test"; - -debug = function( p ) { -// print( p ); -} - -ismaster = function( n ) { - var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); -// print( "ismaster: " + tojson( im ) ); - assert( im, "command ismaster failed" ); - return im.ismaster; -} - -var writeOneIdx = 0; - -writeOne = function( n ) { - n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } ); -} - -getCount = function( n ) { - return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length; -} - -checkWrite = function( m, s ) { - writeOne( m ); - assert.eq( 1, getCount( m ) ); - check( s ); -} - -check = function( s ) { - s.setSlaveOk(); - assert.soon( function() { - return 1 == getCount( s ); - } ); - sleep( 500 ); // wait for sync clone to finish up -} - -// check that slave reads and writes are guarded -checkSlaveGuard = function( s ) { - var t = s.getDB( baseName + "-temp" ).temp; - assert.throws( t.find().count, [], "not master" ); - assert.throws( t.find(), [], "not master", "find did not assert" ); - - checkError = function() { - assert.eq( "not master", s.getDB( "admin" ).getLastError() ); - s.getDB( "admin" ).resetError(); - } - s.getDB( "admin" ).resetError(); - t.save( {x:1} ); - checkError(); - t.update( {}, {x:2}, true ); - checkError(); - t.remove( {x:0} ); - checkError(); -} - -doTest = function( signal ) { - - ports = allocatePorts( 3 ); - - a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - - rp = new ReplPair( l, r, a ); - rp.start(); - rp.waitForSteadyState(); - - checkSlaveGuard( rp.slave() ); - - checkWrite( rp.master(), rp.slave() ); - - debug( "kill first" ); - rp.killNode( rp.master(), signal ); - rp.waitForSteadyState( [ 1, null ], rp.slave().host ); - writeOne( rp.master() ); - - debug( "restart first" ); - rp.start( true ); - rp.waitForSteadyState(); - check( rp.slave() ); - checkWrite( rp.master(), rp.slave() ); - - debug( "kill second" ); - rp.killNode( rp.master(), signal ); - rp.waitForSteadyState( [ 1, null ], rp.slave().host ); - - debug( "restart second" ); - rp.start( true ); - rp.waitForSteadyState( [ 1, 0 ], rp.master().host ); - checkWrite( rp.master(), rp.slave() ); - - ports.forEach( function( x ) { stopMongod( x ); } ); - -} - -doTest( 15 ); // SIGTERM -doTest( 9 ); // SIGKILL diff --git a/jstests/repl/pair2.js b/jstests/repl/pair2.js deleted file mode 100644 index 2491fb2..0000000 --- a/jstests/repl/pair2.js +++ /dev/null @@ -1,71 +0,0 @@ -// Pairing resync - -var baseName = "jstests_pair2test"; - -ismaster = function( n ) { - im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); - assert( im ); - return im.ismaster; -} - -soonCount = function( m, count ) { - assert.soon( function() { -// print( "counting" ); -//// print( "counted: " + l.getDB( baseName ).z.find().count() ); - return m.getDB( baseName ).z.find().count() == count; - } ); -} - -doTest = function( signal ) { - - ports = allocatePorts( 3 ); - - a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - - rp = new ReplPair( l, r, a ); - rp.start(); - rp.waitForSteadyState(); - - rp.slave().setSlaveOk(); - mz = rp.master().getDB( baseName ).z; - - mz.save( { _id: new ObjectId() } ); - soonCount( rp.slave(), 1 ); - assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok ); - - sleep( 3000 ); // allow time to finish clone and save ReplSource - rp.killNode( rp.slave(), signal ); - rp.waitForSteadyState( [ 1, null ], rp.master().host ); - - big = new Array( 2000 ).toString(); - for( i = 0; i < 1000; ++i ) - mz.save( { _id: new ObjectId(), i: i, b: big } ); - - rp.start( true ); - rp.waitForSteadyState( [ 1, 0 ], rp.master().host ); - - sleep( 15000 ); - - rp.slave().setSlaveOk(); - assert.soon( function() { - ret = rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ); -// printjson( ret ); - return 1 == ret.ok; - } ); - - sleep( 8000 ); - soonCount( rp.slave(), 1001 ); - sz = rp.slave().getDB( baseName ).z - assert.eq( 1, sz.find( { i: 0 } ).count() ); - assert.eq( 1, sz.find( { i: 999 } ).count() ); - - assert.eq( 0, rp.slave().getDB( "admin" ).runCommand( { "resync" : 1 } ).ok ); - - ports.forEach( function( x ) { stopMongod( x ); } ); - -} - -doTest( 15 ); // SIGTERM -doTest( 9 ); // SIGKILL diff --git a/jstests/repl/pair3.js b/jstests/repl/pair3.js deleted file mode 100644 index d5fdf7e..0000000 --- a/jstests/repl/pair3.js +++ /dev/null @@ -1,245 +0,0 @@ -// test arbitration - -var baseName = "jstests_pair3test"; - -ismaster = function( n ) { - var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); - print( "ismaster: " + tojson( im ) ); - assert( im, "command ismaster failed" ); - return im.ismaster; -} - -// bring up node connections before arbiter connections so that arb can forward to node when expected -connect = function() { - if ( lp == null ) { - print("connecting lp"); - lp = startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - } - if ( rp == null ) { - print("connecting rp"); - rp = startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); - } - if ( al == null ) { - print("connecting al"); - al = startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort ); - } - if ( ar == null ) { - print("connecting ar"); - ar = startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort ); - } -} - -disconnectNode = function( mongo ) { - if ( lp ) { - print("disconnecting lp: "+lpPort); - stopMongoProgram( lpPort ); - lp = null; - } - if ( rp ) { - print("disconnecting rp: "+rpPort); - stopMongoProgram( rpPort ); - rp = null; - } - if ( mongo.host.match( new RegExp( "^127.0.0.1:" + lPort + "$" ) ) ) { - print("disconnecting al: "+alPort); - stopMongoProgram( alPort ); - al = null; - } else if ( mongo.host.match( new RegExp( "^127.0.0.1:" + rPort + "$" ) ) ) { - print("disconnecting ar: "+arPort); - stopMongoProgram( arPort ); - ar = null; - } else { - assert( false, "don't know how to disconnect node: " + mongo ); - } -} - -doTest1 = function() { - al = ar = lp = rp = null; - ports = allocatePorts( 7 ); - aPort = ports[ 0 ]; - alPort = ports[ 1 ]; - arPort = ports[ 2 ]; - lPort = ports[ 3 ]; - lpPort = ports[ 4 ]; - rPort = ports[ 5 ]; - rpPort = ports[ 6 ]; - - connect(); - - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort ); - - pair = new ReplPair( l, r, a ); - - print("normal startup"); - pair.start(); - pair.waitForSteadyState(); - - print("disconnect slave"); - disconnectNode( pair.slave() ); - pair.waitForSteadyState( [ 1, -3 ], pair.master().host ); - - print("disconnect master"); - disconnectNode( pair.master() ); - pair.waitForSteadyState( [ -3, -3 ] ); - - print("reconnect"); - connect(); - pair.waitForSteadyState(); - - print("disconnect master"); - disconnectNode( pair.master() ); - pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true ); - - print("disconnect new master"); - disconnectNode( pair.master() ); - pair.waitForSteadyState( [ -3, -3 ] ); - - print("reconnect"); - connect(); - pair.waitForSteadyState(); - - print("disconnect slave"); - disconnectNode( pair.slave() ); - pair.waitForSteadyState( [ 1, -3 ], pair.master().host ); - - print("reconnect slave"); - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.master().host ); - - print("disconnect master"); - disconnectNode( pair.master() ); - pair.waitForSteadyState( [ 1, -3 ], pair.slave().host, true ); - - print("reconnect old master"); - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.master().host ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); -} - -// this time don't start connected -doTest2 = function() { - al = ar = lp = rp = null; - ports = allocatePorts( 7 ); - aPort = ports[ 0 ]; - alPort = ports[ 1 ]; - arPort = ports[ 2 ]; - lPort = ports[ 3 ]; - lpPort = ports[ 4 ]; - rPort = ports[ 5 ]; - rpPort = ports[ 6 ]; - - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort ); - - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState( [ -3, -3 ] ); - - startMongoProgram( "mongobridge", "--port", arPort, "--dest", "localhost:" + aPort ); - - // there hasn't been an initial sync, no no node will become master - - for( i = 0; i < 10; ++i ) { - assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 ); - sleep( 500 ); - } - - stopMongoProgram( arPort ); - - startMongoProgram( "mongobridge", "--port", alPort, "--dest", "localhost:" + aPort ); - - for( i = 0; i < 10; ++i ) { - assert( pair.isMaster( pair.right() ) == -3 && pair.isMaster( pair.left() ) == -3 ); - sleep( 500 ); - } - - stopMongoProgram( alPort ); - - // connect l and r without a - - startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); - - pair.waitForSteadyState( [ 1, 0 ] ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); -} - -// recover from master - master setup -doTest3 = function() { - al = ar = lp = rp = null; - ports = allocatePorts( 7 ); - aPort = ports[ 0 ]; - alPort = ports[ 1 ]; - arPort = ports[ 2 ]; - lPort = ports[ 3 ]; - lpPort = ports[ 4 ]; - rPort = ports[ 5 ]; - rpPort = ports[ 6 ]; - - connect(); - - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort ); - - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState(); - - // now can only talk to arbiter - stopMongoProgram( lpPort ); - stopMongoProgram( rpPort ); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - // recover - startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); - pair.waitForSteadyState( [ 1, 0 ], null, true ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); -} - -// check that initial sync is persistent -doTest4 = function( signal ) { - al = ar = lp = rp = null; - ports = allocatePorts( 7 ); - aPort = ports[ 0 ]; - alPort = ports[ 1 ]; - arPort = ports[ 2 ]; - lPort = ports[ 3 ]; - lpPort = ports[ 4 ]; - rPort = ports[ 5 ]; - rpPort = ports[ 6 ]; - - connect(); - - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + alPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + arPort ); - - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState(); - - pair.killNode( pair.left(), signal ); - pair.killNode( pair.right(), signal ); - stopMongoProgram( rpPort ); - stopMongoProgram( lpPort ); - - // now can only talk to arbiter - pair.start( true ); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); -} - -doTest1(); -doTest2(); -doTest3(); -doTest4( 15 ); -doTest4( 9 ); diff --git a/jstests/repl/pair4.js b/jstests/repl/pair4.js deleted file mode 100644 index c04433e..0000000 --- a/jstests/repl/pair4.js +++ /dev/null @@ -1,160 +0,0 @@ -// data consistency after master-master - -var baseName = "jstests_pair4test"; - -debug = function( o ) { - printjson( o ); -} - -ismaster = function( n ) { - var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); - print( "ismaster: " + tojson( im ) ); - assert( im, "command ismaster failed" ); - return im.ismaster; -} - -connect = function() { - startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); -} - -disconnect = function() { - stopMongoProgram( lpPort ); - stopMongoProgram( rpPort ); -} - -write = function( m, n, id ) { - if ( id ) { - save = { _id:id, n:n }; - } else { - save = { n:n }; - } - m.getDB( baseName ).getCollection( baseName ).save( save ); -} - -check = function( m, n, id ) { - m.setSlaveOk(); - if ( id ) { - find = { _id:id, n:n }; - } else { - find = { n:n }; - } - assert.soon( function() { return m.getDB( baseName ).getCollection( baseName ).find( find ).count() > 0; }, - "failed waiting for " + m + " value of n to be " + n ); -} - -checkCount = function( m, c ) { - m.setSlaveOk(); - assert.soon( function() { - actual = m.getDB( baseName ).getCollection( baseName ).find().count(); - print( actual ); - return c == actual; }, - "count failed for " + m ); -} - -coll = function( m ) { - return m.getDB( baseName ).getCollection( baseName ); -} - -db2Coll = function( m ) { - return m.getDB( baseName + "_second" ).getCollection( baseName ); -} - -doTest = function( recover, newMaster, newSlave ) { - ports = allocatePorts( 5 ); - aPort = ports[ 0 ]; - lPort = ports[ 1 ]; - lpPort = ports[ 2 ]; - rPort = ports[ 3 ]; - rpPort = ports[ 4 ]; - - // start normally - connect(); - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort ); - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState(); - - firstMaster = pair.master(); - firstSlave = pair.slave(); - - write( pair.master(), 0 ); - write( pair.master(), 1 ); - check( pair.slave(), 0 ); - check( pair.slave(), 1 ); - - // now each can only talk to arbiter - disconnect(); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - m = newMaster(); - write( m, 10 ); - write( m, 100, "a" ); - coll( m ).update( {n:1}, {$set:{n:2}} ); - db2Coll( m ).save( {n:500} ); - db2Coll( m ).findOne(); - - s = newSlave(); - write( s, 20 ); - write( s, 200, "a" ); - coll( s ).update( {n:1}, {n:1,m:3} ); - db2Coll( s ).save( {_id:"a",n:600} ); - db2Coll( s ).findOne(); - - // recover - recover(); - - nodes = [ pair.right(), pair.left() ]; - - nodes.forEach( function( x ) { checkCount( x, 5 ); } ); - nodes.forEach( function( x ) { [ 0, 10, 20, 100 ].forEach( function( y ) { check( x, y ); } ); } ); - - checkM = function( c ) { - assert.soon( function() { - obj = coll( c ).findOne( {n:2} ); - printjson( obj ); - return obj.m == undefined; - }, "n:2 test for " + c + " failed" ); - }; - nodes.forEach( function( x ) { checkM( x ); } ); - - // check separate database - nodes.forEach( function( x ) { assert.soon( function() { - r = db2Coll( x ).findOne( {_id:"a"} ); - debug( r ); - if ( r == null ) { - return false; - } - return 600 == r.n; - } ) } ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); - -} - -// right will be master on recovery b/c both sides will have completed initial sync -debug( "basic test" ); -doTest( function() { - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true ); - }, function() { return pair.right(); }, function() { return pair.left(); } ); - -doRestartTest = function( signal ) { - doTest( function() { - if ( signal == 9 ) { - sleep( 3000 ); - } - pair.killNode( firstMaster, signal ); - connect(); - pair.start( true ); - pair.waitForSteadyState( [ 1, 0 ], firstSlave.host, true ); - }, function() { return firstSlave; }, function() { return firstMaster; } ); -} - -debug( "sigterm restart test" ); -doRestartTest( 15 ) // SIGTERM - -debug( "sigkill restart test" ); -doRestartTest( 9 ) // SIGKILL diff --git a/jstests/repl/pair5.js b/jstests/repl/pair5.js deleted file mode 100644 index de7e2d5..0000000 --- a/jstests/repl/pair5.js +++ /dev/null @@ -1,95 +0,0 @@ -// writes to new master while making master-master logs consistent - -var baseName = "jstests_pair5test"; - -debug = function( p ) { - print( p ); -} - -ismaster = function( n ) { - var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); - print( "ismaster: " + tojson( im ) ); - assert( im, "command ismaster failed" ); - return im.ismaster; -} - -connect = function() { - startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); -} - -disconnect = function() { - stopMongoProgram( lpPort ); - stopMongoProgram( rpPort ); -} - -write = function( m, n, id ) { - if ( id ) { - save = { _id:id, n:n }; - } else { - save = { n:n }; - } - m.getDB( baseName ).getCollection( baseName ).save( save ); -} - -checkCount = function( m, c ) { - m.setSlaveOk(); - assert.soon( function() { - actual = m.getDB( baseName ).getCollection( baseName ).find().count(); - print( actual ); - return c == actual; }, - "count failed for " + m ); -} - -doTest = function( nSlave, opIdMem ) { - ports = allocatePorts( 5 ); - aPort = ports[ 0 ]; - lPort = ports[ 1 ]; - lpPort = ports[ 2 ]; - rPort = ports[ 3 ]; - rpPort = ports[ 4 ]; - - // start normally - connect(); - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort ); - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState(); - - // now each can only talk to arbiter - disconnect(); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - // left will become slave (b/c both completed initial sync) - for( i = 0; i < nSlave; ++i ) { - write( pair.left(), i, i ); - } - pair.left().getDB( baseName ).getCollection( baseName ).findOne(); - - for( i = 10000; i < 15000; ++i ) { - write( pair.right(), i, i ); - } - pair.right().getDB( baseName ).getCollection( baseName ).findOne(); - - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true ); - - pair.master().getDB( baseName ).getCollection( baseName ).update( {_id:nSlave - 1}, {_id:nSlave - 1,n:-1}, true ); - assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n ); - checkCount( pair.master(), 5000 + nSlave ); - assert.eq( -1, pair.master().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n ); - pair.slave().setSlaveOk(); - assert.soon( function() { - n = pair.slave().getDB( baseName ).getCollection( baseName ).findOne( {_id:nSlave - 1} ).n; - print( n ); - return -1 == n; - } ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); - -} - -doTest( 5000, 100000000 ); -doTest( 5000, 100 ); // force op id converstion to collection based storage diff --git a/jstests/repl/pair6.js b/jstests/repl/pair6.js deleted file mode 100644 index b249fc0..0000000 --- a/jstests/repl/pair6.js +++ /dev/null @@ -1,115 +0,0 @@ -// pairing cases where oplogs run out of space - -var baseName = "jstests_pair6test"; - -debug = function( p ) { - print( p ); -} - -ismaster = function( n ) { - var im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); - print( "ismaster: " + tojson( im ) ); - assert( im, "command ismaster failed" ); - return im.ismaster; -} - -connect = function() { - startMongoProgram( "mongobridge", "--port", lpPort, "--dest", "localhost:" + lPort ); - startMongoProgram( "mongobridge", "--port", rpPort, "--dest", "localhost:" + rPort ); -} - -disconnect = function() { - stopMongoProgram( lpPort ); - stopMongoProgram( rpPort ); -} - -checkCount = function( m, c ) { - m.setSlaveOk(); - assert.soon( function() { - actual = m.getDB( baseName ).getCollection( baseName ).find().count(); - print( actual ); - return c == actual; }, - "expected count " + c + " for " + m ); -} - -resetSlave = function( s ) { - s.setSlaveOk(); - assert.soon( function() { - ret = s.getDB( "admin" ).runCommand( { "resync" : 1 } ); - // printjson( ret ); - return 1 == ret.ok; - } ); -} - -big = new Array( 2000 ).toString(); - -doTest = function() { - ports = allocatePorts( 5 ); - aPort = ports[ 0 ]; - lPort = ports[ 1 ]; - lpPort = ports[ 2 ]; - rPort = ports[ 3 ]; - rpPort = ports[ 4 ]; - - // start normally - connect(); - a = new MongodRunner( aPort, "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( lPort, "/data/db/" + baseName + "-left", "127.0.0.1:" + rpPort, "127.0.0.1:" + aPort ); - r = new MongodRunner( rPort, "/data/db/" + baseName + "-right", "127.0.0.1:" + lpPort, "127.0.0.1:" + aPort ); - pair = new ReplPair( l, r, a ); - pair.start(); - pair.waitForSteadyState(); - - disconnect(); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - print( "test one" ); - - // fill new slave oplog - for( i = 0; i < 1000; ++i ) { - pair.left().getDB( baseName ).getCollection( baseName ).save( {b:big} ); - } - pair.left().getDB( baseName ).getCollection( baseName ).findOne(); - - // write single to new master - pair.right().getDB( baseName ).getCollection( baseName ).save( {} ); - - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true ); - - resetSlave( pair.left() ); - - checkCount( pair.left(), 1 ); - checkCount( pair.right(), 1 ); - - pair.right().getDB( baseName ).getCollection( baseName ).remove( {} ); - checkCount( pair.left(), 0 ); - - disconnect(); - pair.waitForSteadyState( [ 1, 1 ], null, true ); - - print( "test two" ); - - // fill new master oplog - for( i = 0; i < 1000; ++i ) { - pair.right().getDB( baseName ).getCollection( baseName ).save( {b:big} ); - } - - pair.left().getDB( baseName ).getCollection( baseName ).save( {_id:"abcde"} ); - - connect(); - pair.waitForSteadyState( [ 1, 0 ], pair.right().host, true ); - - sleep( 15000 ); - - resetSlave( pair.left() ); - - checkCount( pair.left(), 1000 ); - checkCount( pair.right(), 1000 ); - assert.eq( 0, pair.left().getDB( baseName ).getCollection( baseName ).find( {_id:"abcde"} ).count() ); - - ports.forEach( function( x ) { stopMongoProgram( x ); } ); - -} - -doTest(); \ No newline at end of file diff --git a/jstests/repl/pair7.js b/jstests/repl/pair7.js deleted file mode 100644 index 52ef91f..0000000 --- a/jstests/repl/pair7.js +++ /dev/null @@ -1,85 +0,0 @@ -// pairing with auth - -var baseName = "jstests_pair7test"; - -setAdmin = function( n ) { - n.getDB( "admin" ).addUser( "super", "super" ); - n.getDB( "local" ).addUser( "repl", "foo" ); - n.getDB( "local" ).system.users.findOne(); -} - -auth = function( n ) { - return n.getDB( baseName ).auth( "test", "test" ); -} - -doTest = function( signal ) { - - ports = allocatePorts( 3 ); - - m = startMongod( "--port", ports[ 1 ], "--dbpath", "/data/db/" + baseName + "-left", "--nohttpinterface", "--bind_ip", "127.0.0.1" ); - setAdmin( m ); - stopMongod( ports[ 1 ] ); - - m = startMongod( "--port", ports[ 2 ], "--dbpath", "/data/db/" + baseName + "-right", "--nohttpinterface", "--bind_ip", "127.0.0.1" ); - setAdmin( m ); - stopMongod( ports[ 2 ] ); - - a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] ); - r = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ], [ "--auth" ] ); - - rp = new ReplPair( l, r, a ); - rp.start( true ); - rp.waitForSteadyState(); - - rp.master().getDB( "admin" ).auth( "super", "super" ); - rp.master().getDB( baseName ).addUser( "test", "test" ); - auth( rp.master() ); // reauth - assert.soon( function() { return auth( rp.slave() ); } ); - rp.slave().setSlaveOk(); - - ma = rp.master().getDB( baseName ).a; - ma.save( {} ); - sa = rp.slave().getDB( baseName ).a; - assert.soon( function() { return 1 == sa.count(); } ); - - rp.killNode( rp.slave(), signal ); - rp.waitForSteadyState( [ 1, null ] ); - ma.save( {} ); - - rp.start( true ); - rp.waitForSteadyState(); - assert.soon( function() { return auth( rp.slave() ); } ); - rp.slave().setSlaveOk(); - sa = rp.slave().getDB( baseName ).a; - assert.soon( function() { return 2 == sa.count(); } ); - - ma.save( {a:1} ); - assert.soon( function() { return 1 == sa.count( {a:1} ); } ); - - ma.update( {a:1}, {b:2} ); - assert.soon( function() { return 1 == sa.count( {b:2} ); } ); - - ma.remove( {b:2} ); - assert.soon( function() { return 0 == sa.count( {b:2} ); } ); - - rp.killNode( rp.master(), signal ); - rp.waitForSteadyState( [ 1, null ] ); - ma = sa; - ma.save( {} ); - - rp.start( true ); - rp.waitForSteadyState(); - assert.soon( function() { return auth( rp.slave() ); } ); - rp.slave().setSlaveOk(); - sa = rp.slave().getDB( baseName ).a; - assert.soon( function() { return 3 == sa.count(); } ); - - ma.save( {} ); - assert.soon( function() { return 4 == sa.count(); } ); - - ports.forEach( function( x ) { stopMongod( x ); } ); -} - -doTest( 15 ); // SIGTERM -doTest( 9 ); // SIGKILL diff --git a/jstests/repl/repl2.js b/jstests/repl/repl2.js index 42b0caf..b290c61 100644 --- a/jstests/repl/repl2.js +++ b/jstests/repl/repl2.js @@ -1,34 +1,43 @@ // Test resync command soonCount = function( count ) { - assert.soon( function() { + assert.soon( function() { // print( "check count" ); // print( "count: " + s.getDB( baseName ).z.find().count() ); - return s.getDB("foo").a.find().count() == count; - } ); + return s.getDB("foo").a.find().count() == count; + } ); } doTest = function( signal ) { - + print("signal: "+signal); + var rt = new ReplTest( "repl2tests" ); // implicit small oplog makes slave get out of sync - m = rt.start( true ); + m = rt.start( true, { oplogSize : "1" } ); s = rt.start( false ); - + am = m.getDB("foo").a - + am.save( { _id: new ObjectId() } ); soonCount( 1 ); assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok ); rt.stop( false , signal ); - + big = new Array( 2000 ).toString(); for( i = 0; i < 1000; ++i ) am.save( { _id: new ObjectId(), i: i, b: big } ); s = rt.start( false , null , true ); - assert.soon( function() { return 1 == s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok; } ); + + print("earliest op in master: "+tojson(m.getDB("local").oplog.$main.find().sort({$natural:1}).limit(1).next())); + print("latest op on slave: "+tojson(s.getDB("local").sources.findOne())); + + assert.soon( function() { + var result = s.getDB( "admin" ).runCommand( { "resync" : 1 } ); + print("resync says: "+tojson(result)); + return result.ok == 1; + } ); soonCount( 1001 ); assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" ); @@ -36,7 +45,7 @@ doTest = function( signal ) { as = s.getDB("foo").a assert.eq( 1, as.find( { i: 0 } ).count() ); assert.eq( 1, as.find( { i: 999 } ).count() ); - + assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok ); rt.stop(); diff --git a/jstests/repl/repl3.js b/jstests/repl/repl3.js index d3c3848..5ace9b6 100644 --- a/jstests/repl/repl3.js +++ b/jstests/repl/repl3.js @@ -10,38 +10,42 @@ soonCount = function( count ) { } ); } -doTest = function( signal ) { - - rt = new ReplTest( "repl3tests" ); - - m = rt.start( true ); - s = rt.start( false ); - - am = m.getDB( baseName ).a - - am.save( { _id: new ObjectId() } ); - soonCount( 1 ); - rt.stop( false, signal ); - - big = new Array( 2000 ).toString(); - for( i = 0; i < 1000; ++i ) - am.save( { _id: new ObjectId(), i: i, b: big } ); - - s = rt.start( false, { autoresync: null }, true ); - +doTest = function (signal) { + + print("repl3.js doTest(" + signal + ")") + + rt = new ReplTest("repl3tests"); + + m = rt.start(true); + s = rt.start(false); + + am = m.getDB(baseName).a + + am.save({ _id: new ObjectId() }); + soonCount(1); + rt.stop(false, signal); + + big = new Array(2000).toString(); + for (i = 0; i < 1000; ++i) + am.save({ _id: new ObjectId(), i: i, b: big }); + + s = rt.start(false, { autoresync: null }, true); + // after SyncException, mongod waits 10 secs. - sleep( 15000 ); - + sleep(15000); + // Need the 2 additional seconds timeout, since commands don't work on an 'allDead' node. - soonCount( 1001 ); - as = s.getDB( baseName ).a - assert.eq( 1, as.find( { i: 0 } ).count() ); - assert.eq( 1, as.find( { i: 999 } ).count() ); - - assert.commandFailed( s.getDB( "admin" ).runCommand( { "resync" : 1 } ) ); + soonCount(1001); + as = s.getDB(baseName).a + assert.eq(1, as.find({ i: 0 }).count()); + assert.eq(1, as.find({ i: 999 }).count()); + + assert.commandFailed(s.getDB("admin").runCommand({ "resync": 1 })); rt.stop(); } doTest( 15 ); // SIGTERM doTest( 9 ); // SIGKILL + +print("repl3.js OK") diff --git a/jstests/repl/replacePeer1.js b/jstests/repl/replacePeer1.js deleted file mode 100644 index b3743ce..0000000 --- a/jstests/repl/replacePeer1.js +++ /dev/null @@ -1,82 +0,0 @@ -// test replace peer on master - -var baseName = "jstests_replacepeer1test"; - -ismaster = function( n ) { - im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); -// print( "ismaster: " + tojson( im ) ); - assert( im ); - return im.ismaster; -} - -var writeOneIdx = 0; - -writeOne = function( n ) { - n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } ); -} - -getCount = function( n ) { - return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length; -} - -checkWrite = function( m, s ) { - writeOne( m ); - assert.eq( 1, getCount( m ) ); - s.setSlaveOk(); - assert.soon( function() { - return 1 == getCount( s ); - } ); -} - -doTest = function( signal ) { - - ports = allocatePorts( 4 ); - - a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - - rp = new ReplPair( l, r, a ); - rp.start(); - rp.waitForSteadyState( [ 1, 0 ] ); - rightMaster = ( rp.master().host == rp.right().host ); - - checkWrite( rp.master(), rp.slave() ); - - rp.killNode( rp.slave(), signal ); - - writeOne( rp.master() ); - - assert.commandWorked( rp.master().getDB( "admin" ).runCommand( {replacepeer:1} ) ); - - rp.killNode( rp.master(), signal ); - rp.killNode( rp.arbiter(), signal ); - - if ( rightMaster ) { - o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - rp = new ReplPair( o, r, a ); - resetDbpath( "/data/db/" + baseName + "-left" ); - } else { - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - rp = new ReplPair( l, o, a ); - resetDbpath( "/data/db/" + baseName + "-right" ); - } - - rp.start( true ); - rp.waitForSteadyState( [ 1, 0 ] ); - - rp.slave().setSlaveOk(); - assert.eq( 2, rp.master().getDB( baseName ).z.find().toArray().length ); - assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length ); - - checkWrite( rp.master(), rp.slave() ); - assert.eq( 3, rp.slave().getDB( baseName ).z.find().toArray().length ); - - ports.forEach( function( x ) { stopMongod( x ); } ); - -} - -doTest( 15 ); // SIGTERM -doTest( 9 ); // SIGKILL diff --git a/jstests/repl/replacePeer2.js b/jstests/repl/replacePeer2.js deleted file mode 100644 index 33b054a..0000000 --- a/jstests/repl/replacePeer2.js +++ /dev/null @@ -1,86 +0,0 @@ -// test replace peer on slave - -var baseName = "jstests_replacepeer2test"; - -ismaster = function( n ) { - im = n.getDB( "admin" ).runCommand( { "ismaster" : 1 } ); -// print( "ismaster: " + tojson( im ) ); - assert( im ); - return im.ismaster; -} - -var writeOneIdx = 0; - -writeOne = function( n ) { - n.getDB( baseName ).z.save( { _id: new ObjectId(), i: ++writeOneIdx } ); -} - -getCount = function( n ) { - return n.getDB( baseName ).z.find( { i: writeOneIdx } ).toArray().length; -} - -checkWrite = function( m, s ) { - writeOne( m ); - assert.eq( 1, getCount( m ) ); - s.setSlaveOk(); - assert.soon( function() { - return 1 == getCount( s ); - } ); -} - -doTest = function( signal ) { - - ports = allocatePorts( 4 ); - - a = new MongodRunner( ports[ 0 ], "/data/db/" + baseName + "-arbiter" ); - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - - rp = new ReplPair( l, r, a ); - rp.start(); - rp.waitForSteadyState( [ 1, 0 ] ); - leftSlave = ( rp.slave().host == rp.left().host ); - - checkWrite( rp.master(), rp.slave() ); - - // allow slave to finish initial sync - var res = rp.slave().getDB( "admin" ).runCommand( {replacepeer:1} ); - assert( res.ok , "replacepeer didn't finish: " + tojson( res ) ); - - // Should not be saved to slave. - writeOne( rp.master() ); - // Make sure there would be enough time to save to l if we hadn't called replacepeer. - sleep( 10000 ); - - ports.forEach( function( x ) { stopMongod( x, signal ); } ); - - if ( leftSlave ) { - l = new MongodRunner( ports[ 1 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - rp = new ReplPair( l, o, a ); - resetDbpath( "/data/db/" + baseName + "-right" ); - } else { - o = new MongodRunner( ports[ 2 ], "/data/db/" + baseName + "-left", "127.0.0.1:" + ports[ 3 ], "127.0.0.1:" + ports[ 0 ] ); - r = new MongodRunner( ports[ 3 ], "/data/db/" + baseName + "-right", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); - rp = new ReplPair( o, r, a ); - resetDbpath( "/data/db/" + baseName + "-left" ); - } - - rp.start( true ); - rp.waitForSteadyState( [ 1, 0 ] ); - - rp.slave().setSlaveOk(); - assert.eq( 1, rp.slave().getDB( baseName ).z.find().toArray().length ); - assert.eq( 1, rp.master().getDB( baseName ).z.find().toArray().length ); - - checkWrite( rp.master(), rp.slave() ); - assert.eq( 2, rp.slave().getDB( baseName ).z.find().toArray().length ); - - ports.forEach( function( x ) { stopMongod( x ); } ); - -} - -doTest( 15 ); // SIGTERM -doTest( 9 ); // SIGKILL - -print("replace2Peer finishes"); diff --git a/jstests/repl/snapshot2.js b/jstests/repl/snapshot2.js deleted file mode 100644 index 60b3531..0000000 --- a/jstests/repl/snapshot2.js +++ /dev/null @@ -1,72 +0,0 @@ -// Test SERVER-623 - starting repl peer from a new snapshot of master - -print("snapshot2.js 1 -----------------------------------------------------------"); - -ports = allocatePorts( 3 ); - -var baseName = "repl_snapshot2"; -var basePath = "/data/db/" + baseName; - -a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" ); -l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); -r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - -print("snapshot2.js 2 -----------------------------------------------------------"); - -rp = new ReplPair(l, r, a); -rp.start(); -print("snapshot2.js 3 -----------------------------------------------------------"); -rp.waitForSteadyState(); - -print("snapshot2.js 4 -----------------------------------------------------------"); - -big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously -rp.slave().setSlaveOk(); -print("snapshot2.js 5 -----------------------------------------------------------"); -for (i = 0; i < 500; ++i) { - rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } ); - if (i % 250 == 249) { - function p() { return i + 1 == rp.slave().getDB(baseName)[baseName].count(); } - try { - assert.soon(p); - } catch (e) { - print("\n\n\nsnapshot2.js\ni+1:" + (i + 1)); - print("slave count:" + rp.slave().getDB(baseName)[baseName].count()); - sleep(2000); - print(p()); - throw (e); - } - sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise - } -} -print("snapshot2.js 6 -----------------------------------------------------------"); - -rp.master().getDB( "admin" ).runCommand( {fsync:1,lock:1} ); -leftMaster = ( rp.master().host == rp.left().host ); -rp.killNode( rp.slave() ); -if ( leftMaster ) { - copyDbpath( basePath + "-left", basePath + "-right" ); -} else { - copyDbpath( basePath + "-right", basePath + "-left" ); -} -rp.master().getDB( "admin" ).$cmd.sys.unlock.findOne(); -rp.killNode( rp.master() ); - -clearRawMongoProgramOutput(); - -rp.right_.extraArgs_ = [ "--fastsync" ]; -rp.left_.extraArgs_ = [ "--fastsync" ]; - -rp.start( true ); -rp.waitForSteadyState(); -assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() ); -rp.slave().setSlaveOk(); -assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() ); -rp.master().getDB( baseName )[ baseName ].save( {i:500} ); -assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } ); - -assert( !rawMongoProgramOutput().match( /resync/ ) ); -assert(!rawMongoProgramOutput().match(/SyncException/)); - -print("snapshot2.js SUCCESS ----------------"); - diff --git a/jstests/repl/snapshot3.js b/jstests/repl/snapshot3.js deleted file mode 100644 index 02955e5..0000000 --- a/jstests/repl/snapshot3.js +++ /dev/null @@ -1,53 +0,0 @@ -// Test SERVER-623 - starting repl peer from a new snapshot of slave - -ports = allocatePorts( 3 ); - -var baseName = "repl_snapshot3"; -var basePath = "/data/db/" + baseName; - -a = new MongodRunner( ports[ 0 ], basePath + "-arbiter" ); -l = new MongodRunner( ports[ 1 ], basePath + "-left", "127.0.0.1:" + ports[ 2 ], "127.0.0.1:" + ports[ 0 ] ); -r = new MongodRunner( ports[ 2 ], basePath + "-right", "127.0.0.1:" + ports[ 1 ], "127.0.0.1:" + ports[ 0 ] ); - -rp = new ReplPair( l, r, a ); -rp.start(); -rp.waitForSteadyState(); - -big = new Array( 2000 ).toString(); // overflow oplog, so test can't pass supriously -rp.slave().setSlaveOk(); -for( i = 0; i < 500; ++i ) { - rp.master().getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } ); - if ( i % 250 == 249 ) { - assert.soon( function() { return i+1 == rp.slave().getDB( baseName )[ baseName ].count(); } ); - sleep( 10 ); // give master a chance to grab a sync point - have such small oplogs the master log might overflow otherwise - } -} - -rp.slave().getDB( "admin" ).runCommand( {fsync:1,lock:1} ); -leftSlave = ( rp.slave().host == rp.left().host ); -rp.killNode( rp.master() ); -if ( leftSlave ) { - copyDbpath( basePath + "-left", basePath + "-right" ); -} else { - copyDbpath( basePath + "-right", basePath + "-left" ); -} -rp.slave().getDB( "admin" ).$cmd.sys.unlock.findOne(); -rp.killNode( rp.slave() ); - -clearRawMongoProgramOutput(); - -rp.right_.extraArgs_ = [ "--fastsync" ]; -rp.left_.extraArgs_ = [ "--fastsync" ]; - -rp.start( true ); -rp.waitForSteadyState(); -assert.eq( 500, rp.master().getDB( baseName )[ baseName ].count() ); -rp.slave().setSlaveOk(); -assert.eq( 500, rp.slave().getDB( baseName )[ baseName ].count() ); -rp.master().getDB( baseName )[ baseName ].save( {i:500} ); -assert.soon( function() { return 501 == rp.slave().getDB( baseName )[ baseName ].count(); } ); - -assert( !rawMongoProgramOutput().match( new RegExp( "resync.*" + baseName + ".*\n" ) ) , "last1" ); -assert( !rawMongoProgramOutput().match( /SyncException/ ) , "last2" ); - -print("snapshot3.js finishes"); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 60e4b95..71ab2d9 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -3,17 +3,27 @@ load("jstests/replsets/rslib.js"); var name = "rs_auth1"; -var port = allocatePorts(4); -var path = "jstests/replsets/"; +var port = allocatePorts(5); +var path = "jstests/libs/"; + + +print("try starting mongod with auth"); +var m = runMongoProgram( "mongod", "--auth", "--port", port[4], "--dbpath", "/data/db/wrong-auth"); + +assert.throws(function() { + m.getDB("local").auth("__system", ""); +}); + +stopMongod(port[4]); + - print("reset permissions"); run("chmod", "644", path+"key1"); run("chmod", "644", path+"key2"); print("try starting mongod"); -var m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name); +m = runMongoProgram( "mongod", "--keyFile", path+"key1", "--port", port[0], "--dbpath", "/data/db/" + name); print("should fail with wrong permissions"); @@ -81,6 +91,10 @@ function doQueryOn(p) { doQueryOn(slave); master.adminCommand({logout:1}); + +print("unauthorized:"); +printjson(master.adminCommand({replSetGetStatus : 1})); + doQueryOn(master); @@ -125,11 +139,12 @@ master.auth("bar", "baz"); for (var i=0; i<1000; i++) { master.foo.insert({x:i, foo : "bar"}); } -master.runCommand({getlasterror:1, w:3, wtimeout:60000}); +var result = master.runCommand({getlasterror:1, w:2, wtimeout:60000}); +printjson(result); print("resync"); -rs.restart(0); +rs.restart(0, {"keyFile" : path+"key1"}); print("add some more data 2"); @@ -159,7 +174,7 @@ master.getSisterDB("admin").auth("foo", "bar"); print("shouldn't ever sync"); -for (var i = 0; i<30; i++) { +for (var i = 0; i<10; i++) { print("iteration: " +i); var results = master.adminCommand({replSetGetStatus:1}); printjson(results); @@ -177,9 +192,15 @@ conn = new MongodRunner(port[3], "/data/db/"+name+"-3", null, null, ["--replSet" conn.start(); wait(function() { + try { var results = master.adminCommand({replSetGetStatus:1}); printjson(results); return results.members[3].state == 2; + } + catch (e) { + print(e); + } + return false; }); print("make sure it has the config, too"); diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js index 6d2d0f3..2519c09 100644 --- a/jstests/replsets/cloneDb.js +++ b/jstests/replsets/cloneDb.js @@ -6,34 +6,36 @@ doTest = function( signal ) { var N = 2000 - // ~1KB string + print("~1KB string"); var Text = '' for (var i = 0; i < 40; i++) Text += 'abcdefghijklmnopqrstuvwxyz' - // Create replica set + print("Create replica set"); var repset = new ReplicaSet ('testSet', 3) .begin() var master = repset.getMaster() var db1 = master.getDB('test') - // Insert data + print("Insert data"); for (var i = 0; i < N; i++) { db1['foo'].insert({x: i, text: Text}) db1.getLastError(2) // wait to be copied to at least one secondary } - // Create single server + print("Create single server"); var solo = new Server ('singleTarget') var soloConn = solo.begin() + soloConn.getDB("admin").runCommand({setParameter:1,logLevel:5}); + var db2 = soloConn.getDB('test') - // Clone db from replica set to single server + print("Clone db from replica set to single server"); db2.cloneDatabase (repset.getURL()) - // Confirm clone worked + print("Confirm clone worked"); assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test1)') - // Now test the reverse direction + print("Now test the reverse direction"); db1 = master.getDB('test2') db2 = soloConn.getDB('test2') for (var i = 0; i < N; i++) { @@ -43,7 +45,7 @@ doTest = function( signal ) { db1.cloneDatabase (solo.host()) assert.eq (Text, db2['foo'] .findOne({x: N-1}) ['text'], 'cloneDatabase failed (test2)') - // Shut down replica set and single server + print("Shut down replica set and single server"); solo.end() repset.stopSet( signal ) } diff --git a/jstests/replsets/config1.js b/jstests/replsets/config1.js deleted file mode 100644 index 748ce8f..0000000 --- a/jstests/replsets/config1.js +++ /dev/null @@ -1,21 +0,0 @@ -doTest = function( signal ) { - var name = 'config1'; - - var replTest = new ReplSetTest( {name: name, nodes: 3} ); - var nodes = replTest.startSet(); - - var config = replTest.getReplSetConfig(); - config.settings = {"heartbeatSleep" : .5, heartbeatTimeout : .8}; - - replTest.initiate(config); - - // Call getMaster to return a reference to the node that's been - // elected master. - var master = replTest.getMaster(); - - config = master.getDB("local").system.replset.findOne(); - assert.eq(config.settings.heartbeatSleep, .5); - assert.eq(config.settings.heartbeatTimeout, .8); -}; - -doTest(15); diff --git a/jstests/replsets/downstream.js b/jstests/replsets/downstream.js new file mode 100755 index 0000000..795e667 --- /dev/null +++ b/jstests/replsets/downstream.js @@ -0,0 +1,36 @@ +// BUG: [SERVER-1768] replica set getlasterror {w: 2} after 2000 +// inserts hangs while secondary servers log "replSet error RS102 too stale to catch up" every once in a while + +function newReplicaSet (name, numServers) { + var rs = new ReplSetTest({name: name, nodes: numServers}) + rs.startSet() + rs.initiate() + rs.awaitReplication() + return rs +} + +function go() { +var N = 2000 + +// ~1KB string +var Text = '' +for (var i = 0; i < 40; i++) + Text += 'abcdefghijklmnopqrstuvwxyz' + +// Create replica set of 3 servers +var repset = newReplicaSet('repset', 3) +var conn = repset.getMaster() +var db = conn.getDB('test') + +// Add data to it +for (var i = 0; i < N; i++) + db['foo'].insert({x: i, text: Text}) + +// wait to be copied to at least one secondary (BUG hangs here) +db.getLastError(2) + +print('getlasterror_w2.js SUCCESS') +} + +// turn off until fixed +//go(); diff --git a/jstests/replsets/fastsync.js b/jstests/replsets/fastsync.js index d7c3905..1c9c215 100644 --- a/jstests/replsets/fastsync.js +++ b/jstests/replsets/fastsync.js @@ -22,7 +22,7 @@ var wait = function(f) { } var reconnect = function(a) { - wait(function() { + wait(function() { try { a.getDB("foo").bar.stats(); return true; @@ -33,7 +33,7 @@ var reconnect = function(a) { }); }; -ports = allocatePorts( 3 ); +ports = allocatePorts( 4 ); var basename = "jstests_fastsync"; var basePath = "/data/db/" + basename; @@ -48,7 +48,7 @@ var admin = p.getDB("admin"); var foo = p.getDB("foo"); var local = p.getDB("local"); -var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0]}]}; +var config = {_id : basename, members : [{_id : 0, host : hostname+":"+ports[0], priority:2}]}; printjson(config); var result = admin.runCommand({replSetInitiate : config}); print("result:"); @@ -59,10 +59,19 @@ while (count < 10 && result.ok != 1) { count++; sleep(2000); result = admin.runCommand({replSetInitiate : config}); -} +} assert(result.ok, tojson(result)); -assert.soon(function() { return admin.runCommand({isMaster:1}).ismaster; }); +assert.soon(function() { result = false; + try { + result = admin.runCommand({isMaster:1}).ismaster; + } + catch(e) { + print(e); + return false; + } + return result; + }); print("1"); for (var i=0; i<100000; i++) { @@ -73,45 +82,113 @@ print("total in foo: "+foo.bar.count()); print("2"); admin.runCommand( {fsync:1,lock:1} ); -copyDbpath( basePath + "-p", basePath + "-s" ); +copyDbpath( basePath + "-p", basePath + "-s"+1 ); admin.$cmd.sys.unlock.findOne(); - print("3"); -var sargs = new MongodRunner( ports[ 1 ], basePath + "-s", false, false, +var startSlave = function(n) { + var sargs = new MongodRunner( ports[ n ], basePath + "-s"+n, false, false, ["--replSet", basename, "--fastsync", "--oplogSize", 2], {no_bind : true} ); -var reuseData = true; -sargs.start(reuseData); + var reuseData = true; + var conn = sargs.start(reuseData); + + config = local.system.replset.findOne(); + config.version++; + config.members.push({_id:n, host:hostname+":"+ports[n]}); + + result = admin.runCommand({replSetReconfig : config}); + printjson(result); + assert(result.ok, "reconfig worked"); + reconnect(p); + + print("4"); + var status = admin.runCommand({replSetGetStatus : 1}); + var count = 0; + while (status.members[n].state != 2 && count < 200) { + print("not a secondary yet"); + if (count % 10 == 0) { + printjson(status); + } + assert(!status.members[n].errmsg || !status.members[n].errmsg.match("^initial sync cloning db")); -config = local.system.replset.findOne(); -config.version++; -config.members.push({_id:1, host:hostname+":"+ports[1]}); + sleep(1000); -result = admin.runCommand({replSetReconfig : config}); -assert(result.ok, "reconfig worked"); -reconnect(p); + // disconnection could happen here + try { + status = admin.runCommand({replSetGetStatus : 1}); + } + catch (e) { + print(e); + } + count++; + } -print("4"); -var status = admin.runCommand({replSetGetStatus : 1}); -var count = 0; -while (status.members[1].state != 2 && count < 200) { - print("not a secondary yet"); - if (count % 10 == 0) { - printjson(status); - } - assert(!status.members[1].errmsg || !status.members[1].errmsg.match("^initial sync cloning db")); - - sleep(1000); - - // disconnection could happen here - try { - status = admin.runCommand({replSetGetStatus : 1}); - } - catch (e) { - print(e); - } - count++; + assert.eq(status.members[n].state, 2); + + assert.soon(function() { + return admin.runCommand({isMaster : 1}).ismaster; + }); + + admin.foo.insert({x:1}); + assert.soon(function() { + var last = local.oplog.rs.find().sort({$natural:-1}).limit(1).next(); + var cur = conn.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next(); + print("last: "+tojson(last)+" cur: "+tojson(cur)); + return cur != null && last != null && cur.ts.t == last.ts.t && cur.ts.i == last.ts.i; + }); + + return conn; +}; + +var s1 = startSlave(1); + +var me1 = s1.getDB("local").me.findOne(); + +print("me: " +me1._id); +assert(me1._id != null); + +print("5"); +s1.getDB("admin").runCommand( {fsync:1,lock:1} ); +copyDbpath( basePath + "-s1", basePath + "-s2" ); +s1.getDB("admin").$cmd.sys.unlock.findOne(); + +var s2 = startSlave(2); + +var me2 = s2.getDB("local").me.findOne(); + +print("me: " +me2._id); +assert(me1._id != me2._id); + +print("restart member with a different port and make it a new set"); +try { + p.getDB("admin").runCommand({shutdown:1}); +} +catch(e) { + print("good, shutting down: " +e); } +sleep(10000); + +pargs = new MongodRunner( ports[ 3 ], basePath + "-p", false, false, + ["--replSet", basename, "--oplogSize", 2], + {no_bind : true} ); +p = pargs.start(true); + +printjson(p.getDB("admin").runCommand({replSetGetStatus:1})); + +p.getDB("admin").runCommand({replSetReconfig : { + _id : basename, + members : [{_id:0, host : hostname+":"+ports[3]}] + }, force : true}); + +print("start waiting for primary..."); +assert.soon(function() { + try { + return p.getDB("admin").runCommand({isMaster : 1}).ismaster; + } + catch(e) { + print(e); + } + return false; + }, "waiting for master", 60000); -assert.eq(status.members[1].state, 2); diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js index df978c4..4cfd606 100644 --- a/jstests/replsets/initial_sync1.js +++ b/jstests/replsets/initial_sync1.js @@ -95,12 +95,11 @@ replTest.stop(1); print("8. Eventually it should become a secondary"); print("if initial sync has started, this will cause it to fail and sleep for 5 minutes"); -sleep(5*60*1000); wait(function() { var status = admin_s2.runCommand({replSetGetStatus:1}); occasionally(function() { printjson(status); }); return status.members[2].state == 2; - }); + }, 350); print("9. Bring #2 back up"); @@ -122,5 +121,5 @@ for (var i=0; i<10000; i++) { print("11. Everyone happy eventually"); -replTest.awaitReplication(); +replTest.awaitReplication(300000); diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js index 471aa16..7f2af94 100644 --- a/jstests/replsets/initial_sync3.js +++ b/jstests/replsets/initial_sync3.js @@ -43,14 +43,14 @@ wait(function() { if (!status.members) { return false; } - + for (i=0; i<7; i++) { if (status.members[i].state != 1 && status.members[i].state != 2) { return false; } } return true; - + }); replTest.awaitReplication(); @@ -66,6 +66,7 @@ rs2.initiate(); master = rs2.getMaster(); var config = master.getDB("local").system.replset.findOne(); config.version++; +config.members[0].priority = 2; config.members[0].initialSync = {state : 2}; config.members[1].initialSync = {state : 1}; try { @@ -75,12 +76,34 @@ catch(e) { print("trying to reconfigure: "+e); } -master = rs2.getMaster(); -config = master.getDB("local").system.replset.findOne(); +// wait for a heartbeat, too, just in case sync happens before hb +assert.soon(function() { + try { + for (var n in rs2.nodes) { + if (rs2.nodes[n].getDB("local").system.replset.findOne().version != 2) { + return false; + } + } + } + catch (e) { + return false; + } + return true; +}); + +rs2.awaitReplication(); + +// test partitioning +master = rs2.bridge(); +rs2.partition(0, 2); + +master.getDB("foo").bar.baz.insert({x:1}); +rs2.awaitReplication(); -assert(typeof(config.members[0].initialSync) == "object"); -assert.eq(config.members[0].initialSync.state, 2); -assert.eq(config.members[1].initialSync.state, 1); +master.getDB("foo").bar.baz.insert({x:2}); +var x = master.getDB("foo").runCommand({getLastError : 1, w : 3, wtimeout : 5000}); +printjson(x); +assert.eq(null, x.err); rs2.stopSet(); diff --git a/jstests/replsets/key1 b/jstests/replsets/key1 deleted file mode 100644 index b5c19e4..0000000 --- a/jstests/replsets/key1 +++ /dev/null @@ -1 +0,0 @@ -foop de doop diff --git a/jstests/replsets/key2 b/jstests/replsets/key2 deleted file mode 100644 index cbde821..0000000 --- a/jstests/replsets/key2 +++ /dev/null @@ -1 +0,0 @@ -other key diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js new file mode 100644 index 0000000..5b068cd --- /dev/null +++ b/jstests/replsets/maintenance.js @@ -0,0 +1,32 @@ + + +var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} ); +var conns = replTest.startSet(); +replTest.initiate(); + +// Make sure we have a master +var master = replTest.getMaster(); + +for (i=0;i<10000; i++) { master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"}); } +for (i=0;i<1000; i++) { master.getDB("bar").foo.update({y:i},{$push :{foo : "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}}); } + +replTest.awaitReplication(); + +assert.soon(function() { return conns[2].getDB("admin").isMaster().secondary; }); + +join = startParallelShell( "db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[2] ); + +print("check secondary goes to recovering"); +assert.soon(function() { return !conns[2].getDB("admin").isMaster().secondary; }); + +print("joining"); +join(); + +print("check secondary becomes a secondary again"); +var x = 0; +assert.soon(function() { + var im = conns[2].getDB("admin").isMaster(); + if (x++ % 5 == 0) printjson(im); + return im.secondary; +}); + diff --git a/jstests/replsets/majority.js b/jstests/replsets/majority.js new file mode 100644 index 0000000..6df1a41 --- /dev/null +++ b/jstests/replsets/majority.js @@ -0,0 +1,60 @@ +var num = 5; +var host = getHostName(); +var name = "tags"; +var timeout = 10000; + +var replTest = new ReplSetTest( {name: name, nodes: num, startPort:31000} ); +var nodes = replTest.startSet(); +var port = replTest.ports; +replTest.initiate({_id : name, members : + [ + {_id:0, host : host+":"+port[0], priority : 2}, + {_id:1, host : host+":"+port[1]}, + {_id:2, host : host+":"+port[2]}, + {_id:3, host : host+":"+port[3], arbiterOnly : true}, + {_id:4, host : host+":"+port[4], arbiterOnly : true}, + ], + }); + +replTest.awaitReplication(); +replTest.bridge(); + +var testInsert = function() { + master.getDB("foo").bar.insert({x:1}); + var result = master.getDB("foo").runCommand({getLastError:1, w:"majority", wtimeout:timeout}); + printjson(result); + return result; +}; + +var master = replTest.getMaster(); + +print("get back in the groove"); +testInsert(); +replTest.awaitReplication(); + +print("makes sure majority works"); +assert.eq(testInsert().err, null); + +print("setup: 0,1 | 2,3,4"); +replTest.partition(0,2); +replTest.partition(0,3); +replTest.partition(0,4); +replTest.partition(1,2); +replTest.partition(1,3); +replTest.partition(1,4); + +print("make sure majority doesn't work"); +// primary should now be 2 +master = replTest.getMaster(); +assert.eq(testInsert().err, "timeout"); + +print("bring set back together"); +replTest.unPartition(0,2); +replTest.unPartition(0,3); +replTest.unPartition(1,4); + +master = replTest.getMaster(); + +print("make sure majority works"); +assert.eq(testInsert().err, null); + diff --git a/jstests/replsets/randomcommands1.js b/jstests/replsets/randomcommands1.js deleted file mode 100644 index c451e74..0000000 --- a/jstests/replsets/randomcommands1.js +++ /dev/null @@ -1,29 +0,0 @@ - -replTest = new ReplSetTest( {name: 'randomcommands1', nodes: 3} ); - -nodes = replTest.startSet(); -replTest.initiate(); - -master = replTest.getMaster(); -slaves = replTest.liveNodes.slaves; -printjson(replTest.liveNodes); - -db = master.getDB("foo") -t = db.foo - -ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } ) - -t.save({a: 1000}); -t.ensureIndex( { a : 1 } ) - -db.getLastError( 3 , 30000 ) - -ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ) - -t.reIndex() - -db.getLastError( 3 , 30000 ) -ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ) - -replTest.stopSet( 15 ) - diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js new file mode 100644 index 0000000..b7dca03 --- /dev/null +++ b/jstests/replsets/reconfig.js @@ -0,0 +1,69 @@ + +// try reconfiguring with servers down + +var replTest = new ReplSetTest({ name: 'testSet', nodes: 5 }); +var nodes = replTest.startSet(); +replTest.initiate(); + +var master = replTest.getMaster(); + +print("initial sync"); +master.getDB("foo").bar.insert({X:1}); +replTest.awaitReplication(); + +print("stopping 3 & 4"); +replTest.stop(3); +replTest.stop(4); + +print("reconfiguring"); +master = replTest.getMaster(); +var config = master.getDB("local").system.replset.findOne(); +var oldVersion = config.version++; +config.members[0].votes = 2; +config.members[3].votes = 2; +try { + master.getDB("admin").runCommand({replSetReconfig : config}); +} +catch(e) { + print(e); +} + +var config = master.getDB("local").system.replset.findOne(); +assert.eq(oldVersion+1, config.version); + + +print("0 & 3 up; 1, 2, 4 down"); +replTest.restart(3); + +// in case 0 isn't master +replTest.awaitReplication(); + +replTest.stop(1); +replTest.stop(2); + +print("try to reconfigure with a 'majority' down"); +oldVersion = config.version; +config.version++; +master = replTest.getMaster(); +try { + master.getDB("admin").runCommand({replSetReconfig : config}); +} +catch (e) { + print(e); +} + +var config = master.getDB("local").system.replset.findOne(); +assert.eq(oldVersion+1, config.version); + +replTest.stopSet(); + +replTest2 = new ReplSetTest({name : 'testSet2', nodes : 1}); +nodes = replTest2.startSet(); + +result = nodes[0].getDB("admin").runCommand({replSetInitiate : {_id : "testSet2", members : [ + {_id : 0, tags : ["member0"]} + ]}}); + +assert(result.errmsg.match(/bad or missing host field/)); + +replTest2.stopSet(); diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js index ebd17d6..f93fe9e 100644 --- a/jstests/replsets/remove1.js +++ b/jstests/replsets/remove1.js @@ -16,7 +16,7 @@ var host = getHostName(); print("Start set with three nodes"); -var replTest = new ReplSetTest( {name: name, nodes: 3} ); +var replTest = new ReplSetTest( {name: name, nodes: 2} ); var nodes = replTest.startSet(); replTest.initiate(); var master = replTest.getMaster(); @@ -28,85 +28,44 @@ master.getDB("foo").bar.baz.insert({x:1}); replTest.awaitReplication(); -print("Remove slave2"); +print("Remove slaves"); var config = replTest.getReplSetConfig(); config.members.pop(); config.version = 2; -try { - master.getDB("admin").runCommand({replSetReconfig:config}); -} -catch(e) { - print(e); -} -reconnect(master); - - -print("Remove slave1"); -config.members.pop(); -config.version = 3; -try { - master.getDB("admin").runCommand({replSetReconfig:config}); -} -catch(e) { - print(e); -} -reconnect(master); - -print("sleeping 1"); -sleep(10000); -// these are already down, but this clears their ports from memory so that they -// can be restarted later -stopMongod(replTest.getPort(1)); -stopMongod(replTest.getPort(2)); - - -print("Bring slave1 back up"); -var paths = [ replTest.getPath(1), replTest.getPath(2) ]; -var ports = allocatePorts(2, replTest.getPort(2)+1); -var args = ["mongod", "--port", ports[0], "--dbpath", paths[0], "--noprealloc", "--smallfiles", "--rest"]; -var conn = startMongoProgram.apply( null, args ); -conn.getDB("local").system.replset.remove(); -printjson(conn.getDB("local").runCommand({getlasterror:1})); -print(conn); -print("sleeping 2"); -sleep(10000); -stopMongod(ports[0]); - -replTest.restart(1); - - -print("Bring slave2 back up"); -args[2] = ports[1]; -args[4] = paths[1]; -conn = startMongoProgram.apply( null, args ); -conn.getDB("local").system.replset.remove(); -print("path: "+paths[1]); -print("sleeping 3"); -sleep(10000); -stopMongod(ports[1]); - -replTest.restart(2); -sleep(10000); - - -print("Add them back as slaves"); +assert.soon(function() { + try { + master.getDB("admin").runCommand({replSetReconfig:config}); + } + catch(e) { + print(e); + } + + reconnect(master); + reconnect(replTest.nodes[1]); + var c = master.getDB("local").system.replset.findOne(); + return c.version == 2; + }); + +print("Add it back as a slave"); config.members.push({_id:1, host : host+":"+replTest.getPort(1)}); -config.members.push({_id:2, host : host+":"+replTest.getPort(2)}); -config.version = 4; +config.version = 3; +printjson(config); wait(function() { try { - master.getDB("admin").runCommand({replSetReconfig:config}); + master.getDB("admin").runCommand({replSetReconfig:config}); } catch(e) { - print(e); + print(e); } reconnect(master); + printjson(master.getDB("admin").runCommand({replSetGetStatus:1})); master.setSlaveOk(); var newConfig = master.getDB("local").system.replset.findOne(); - return newConfig.version == 4; - }); + print( "newConfig: " + tojson(newConfig) ); + return newConfig.version == 3; +} , "wait1" ); print("Make sure everyone's secondary"); @@ -115,18 +74,49 @@ wait(function() { occasionally(function() { printjson(status); }); - - if (!status.members || status.members.length != 3) { + + if (!status.members || status.members.length != 2) { return false; } - for (var i = 0; i<3; i++) { + for (var i = 0; i<2; i++) { if (status.members[i].state != 1 && status.members[i].state != 2) { return false; } } return true; - }); +} , "wait2" ); + + +print("reconfig with minority"); +replTest.stop(1); + +assert.soon(function() { + try { + return master.getDB("admin").runCommand({isMaster : 1}).secondary; + } + catch(e) { + print("trying to get master: "+e); + } +}); + +config.version = 4; +config.members.pop(); +try { + master.getDB("admin").runCommand({replSetReconfig : config, force : true}); +} +catch(e) { + print(e); +} + +reconnect(master); +assert.soon(function() { + return master.getDB("admin").runCommand({isMaster : 1}).ismaster; +}); + +config = master.getDB("local").system.replset.findOne(); +printjson(config); +assert(config.version > 4); replTest.stopSet(); diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index 5ac94e7..6387c5d 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -108,6 +108,28 @@ doTest = function( signal ) { assert.eq( 1000 , count.n , "slave count wrong: " + slave ); }); + // last error + master = replTest.getMaster(); + slaves = replTest.liveNodes.slaves; + printjson(replTest.liveNodes); + + db = master.getDB("foo") + t = db.foo + + ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } ) + + t.save({a: 1000}); + t.ensureIndex( { a : 1 } ) + + db.getLastError( 3 , 30000 ) + + ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ) + + t.reIndex() + + db.getLastError( 3 , 30000 ) + ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } ) + // Shut down the set and finish the test. replTest.stopSet( signal ); } diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js index faa0627..ba08eac 100644 --- a/jstests/replsets/replset3.js +++ b/jstests/replsets/replset3.js @@ -29,7 +29,7 @@ doTest = function (signal) { // Step down master. Note: this may close our connection! try { - master.getDB("admin").runCommand({ replSetStepDown: true }); + master.getDB("admin").runCommand({ replSetStepDown: true, force: 1 }); } catch (err) { print("caught: " + err + " on stepdown"); } diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js index 6a7d8a5..67ce2d7 100644 --- a/jstests/replsets/replset5.js +++ b/jstests/replsets/replset5.js @@ -23,51 +23,63 @@ doTest = function (signal) { master.getDB("barDB").bar.save({ a: 1 }); replTest.awaitReplication(); - // These writes should be replicated immediately - var docNum = 5000; - for(var n=0; n